mirror of
https://github.com/hoernschen/dendrite.git
synced 2024-12-26 15:08:28 +00:00
Add logrus, prometheus client and matrix-org/util
This commit is contained in:
parent
41c6a3737e
commit
63d1bcd66a
313 changed files with 53958 additions and 1 deletions
100
vendor/manifest
vendored
100
vendor/manifest
vendored
|
@ -7,6 +7,19 @@
|
||||||
"revision": "574d3147eee384229bf96a5d12c207fe7b5234f3",
|
"revision": "574d3147eee384229bf96a5d12c207fe7b5234f3",
|
||||||
"branch": "master"
|
"branch": "master"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/Sirupsen/logrus",
|
||||||
|
"repository": "https://github.com/Sirupsen/logrus",
|
||||||
|
"revision": "61e43dc76f7ee59a82bdf3d71033dc12bea4c77d",
|
||||||
|
"branch": "master"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/beorn7/perks/quantile",
|
||||||
|
"repository": "https://github.com/beorn7/perks",
|
||||||
|
"revision": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "/quantile"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"importpath": "github.com/davecgh/go-spew/spew",
|
"importpath": "github.com/davecgh/go-spew/spew",
|
||||||
"repository": "https://github.com/davecgh/go-spew",
|
"repository": "https://github.com/davecgh/go-spew",
|
||||||
|
@ -33,6 +46,13 @@
|
||||||
"revision": "44cc805cf13205b55f69e14bcb69867d1ae92f98",
|
"revision": "44cc805cf13205b55f69e14bcb69867d1ae92f98",
|
||||||
"branch": "master"
|
"branch": "master"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/golang/protobuf/proto",
|
||||||
|
"repository": "https://github.com/golang/protobuf",
|
||||||
|
"revision": "8ee79997227bf9b34611aee7946ae64735e6fd93",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "/proto"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"importpath": "github.com/golang/snappy",
|
"importpath": "github.com/golang/snappy",
|
||||||
"repository": "https://github.com/golang/snappy",
|
"repository": "https://github.com/golang/snappy",
|
||||||
|
@ -51,6 +71,19 @@
|
||||||
"revision": "a6657b2386e9b8be76484c08711b02c7cf867ead",
|
"revision": "a6657b2386e9b8be76484c08711b02c7cf867ead",
|
||||||
"branch": "master"
|
"branch": "master"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/matrix-org/util",
|
||||||
|
"repository": "https://github.com/matrix-org/util",
|
||||||
|
"revision": "2aeb7e5d047ec74d65353f1579990a1e90af5bb0",
|
||||||
|
"branch": "master"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||||
|
"repository": "https://github.com/matttproud/golang_protobuf_extensions",
|
||||||
|
"revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "/pbutil"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"importpath": "github.com/pierrec/lz4",
|
"importpath": "github.com/pierrec/lz4",
|
||||||
"repository": "https://github.com/pierrec/lz4",
|
"repository": "https://github.com/pierrec/lz4",
|
||||||
|
@ -64,17 +97,82 @@
|
||||||
"branch": "master",
|
"branch": "master",
|
||||||
"path": "/xxHash32"
|
"path": "/xxHash32"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/prometheus/client_golang",
|
||||||
|
"repository": "https://github.com/prometheus/client_golang",
|
||||||
|
"revision": "c317fb74746eac4fc65fe3909195f4cf67c5562a",
|
||||||
|
"branch": "master"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/prometheus/client_model/go",
|
||||||
|
"repository": "https://github.com/prometheus/client_model",
|
||||||
|
"revision": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "/go"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/prometheus/common/expfmt",
|
||||||
|
"repository": "https://github.com/prometheus/common",
|
||||||
|
"revision": "dd2f054febf4a6c00f2343686efb775948a8bff4",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "/expfmt"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
|
||||||
|
"repository": "https://github.com/prometheus/common",
|
||||||
|
"revision": "dd2f054febf4a6c00f2343686efb775948a8bff4",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "/internal/bitbucket.org/ww/goautoneg"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/prometheus/common/model",
|
||||||
|
"repository": "https://github.com/prometheus/common",
|
||||||
|
"revision": "dd2f054febf4a6c00f2343686efb775948a8bff4",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "/model"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/prometheus/procfs",
|
||||||
|
"repository": "https://github.com/prometheus/procfs",
|
||||||
|
"revision": "1878d9fbb537119d24b21ca07effd591627cd160",
|
||||||
|
"branch": "master"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"importpath": "github.com/rcrowley/go-metrics",
|
"importpath": "github.com/rcrowley/go-metrics",
|
||||||
"repository": "https://github.com/rcrowley/go-metrics",
|
"repository": "https://github.com/rcrowley/go-metrics",
|
||||||
"revision": "1f30fe9094a513ce4c700b9a54458bbb0c96996c",
|
"revision": "1f30fe9094a513ce4c700b9a54458bbb0c96996c",
|
||||||
"branch": "master"
|
"branch": "master"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/sirupsen/logrus",
|
||||||
|
"repository": "https://github.com/sirupsen/logrus",
|
||||||
|
"revision": "61e43dc76f7ee59a82bdf3d71033dc12bea4c77d",
|
||||||
|
"branch": "master"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "golang.org/x/net/context",
|
||||||
|
"repository": "https://go.googlesource.com/net",
|
||||||
|
"revision": "007e530097ad7f954752df63046b4036f98ba6a6",
|
||||||
|
"branch": "master",
|
||||||
|
"path": "/context"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"importpath": "gopkg.in/Shopify/sarama.v1",
|
"importpath": "gopkg.in/Shopify/sarama.v1",
|
||||||
"repository": "https://gopkg.in/Shopify/sarama.v1",
|
"repository": "https://gopkg.in/Shopify/sarama.v1",
|
||||||
"revision": "0fb560e5f7fbcaee2f75e3c34174320709f69944",
|
"revision": "0fb560e5f7fbcaee2f75e3c34174320709f69944",
|
||||||
"branch": "master"
|
"branch": "master"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "gopkg.in/airbrake/gobrake.v2",
|
||||||
|
"repository": "https://gopkg.in/airbrake/gobrake.v2",
|
||||||
|
"revision": "668876711219e8b0206e2994bf0a59d889c775aa",
|
||||||
|
"branch": "master"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"importpath": "gopkg.in/gemnasium/logrus-airbrake-hook.v2",
|
||||||
|
"repository": "https://gopkg.in/gemnasium/logrus-airbrake-hook.v2",
|
||||||
|
"revision": "bfee1239d796830ca346767650cce5ba90d58c57",
|
||||||
|
"branch": "master"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
66
vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md
vendored
Normal file
66
vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md
vendored
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
# 0.10.0
|
||||||
|
|
||||||
|
* feature: Add a test hook (#180)
|
||||||
|
* feature: `ParseLevel` is now case-insensitive (#326)
|
||||||
|
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
|
||||||
|
* performance: avoid re-allocations on `WithFields` (#335)
|
||||||
|
|
||||||
|
# 0.9.0
|
||||||
|
|
||||||
|
* logrus/text_formatter: don't emit empty msg
|
||||||
|
* logrus/hooks/airbrake: move out of main repository
|
||||||
|
* logrus/hooks/sentry: move out of main repository
|
||||||
|
* logrus/hooks/papertrail: move out of main repository
|
||||||
|
* logrus/hooks/bugsnag: move out of main repository
|
||||||
|
* logrus/core: run tests with `-race`
|
||||||
|
* logrus/core: detect TTY based on `stderr`
|
||||||
|
* logrus/core: support `WithError` on logger
|
||||||
|
* logrus/core: Solaris support
|
||||||
|
|
||||||
|
# 0.8.7
|
||||||
|
|
||||||
|
* logrus/core: fix possible race (#216)
|
||||||
|
* logrus/doc: small typo fixes and doc improvements
|
||||||
|
|
||||||
|
|
||||||
|
# 0.8.6
|
||||||
|
|
||||||
|
* hooks/raven: allow passing an initialized client
|
||||||
|
|
||||||
|
# 0.8.5
|
||||||
|
|
||||||
|
* logrus/core: revert #208
|
||||||
|
|
||||||
|
# 0.8.4
|
||||||
|
|
||||||
|
* formatter/text: fix data race (#218)
|
||||||
|
|
||||||
|
# 0.8.3
|
||||||
|
|
||||||
|
* logrus/core: fix entry log level (#208)
|
||||||
|
* logrus/core: improve performance of text formatter by 40%
|
||||||
|
* logrus/core: expose `LevelHooks` type
|
||||||
|
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||||
|
* formatter/text: print structs more verbosely
|
||||||
|
|
||||||
|
# 0.8.2
|
||||||
|
|
||||||
|
* logrus: fix more Fatal family functions
|
||||||
|
|
||||||
|
# 0.8.1
|
||||||
|
|
||||||
|
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||||
|
|
||||||
|
# 0.8.0
|
||||||
|
|
||||||
|
* logrus: defaults to stderr instead of stdout
|
||||||
|
* hooks/sentry: add special field for `*http.Request`
|
||||||
|
* formatter/text: ignore Windows for colors
|
||||||
|
|
||||||
|
# 0.7.3
|
||||||
|
|
||||||
|
* formatter/\*: allow configuration of timestamp layout
|
||||||
|
|
||||||
|
# 0.7.2
|
||||||
|
|
||||||
|
* formatter/text: Add configuration option for time format (#158)
|
21
vendor/src/github.com/Sirupsen/logrus/LICENSE
vendored
Normal file
21
vendor/src/github.com/Sirupsen/logrus/LICENSE
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Simon Eskildsen
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
433
vendor/src/github.com/Sirupsen/logrus/README.md
vendored
Normal file
433
vendor/src/github.com/Sirupsen/logrus/README.md
vendored
Normal file
|
@ -0,0 +1,433 @@
|
||||||
|
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
|
||||||
|
|
||||||
|
**Seeing weird case-sensitive problems?** See [this
|
||||||
|
issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
|
||||||
|
This change has been reverted. I apologize for causing this. I greatly
|
||||||
|
underestimated the impact this would have. Logrus strives for stability and
|
||||||
|
backwards compatibility and failed to provide that.
|
||||||
|
|
||||||
|
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||||
|
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||||
|
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
||||||
|
many large deployments. The core API is unlikely to change much but please
|
||||||
|
version control your Logrus to make sure you aren't fetching latest `master` on
|
||||||
|
every build.**
|
||||||
|
|
||||||
|
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||||
|
plain text):
|
||||||
|
|
||||||
|
![Colored](http://i.imgur.com/PY7qMwd.png)
|
||||||
|
|
||||||
|
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
|
||||||
|
or Splunk:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||||
|
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||||
|
|
||||||
|
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||||
|
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||||
|
|
||||||
|
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||||
|
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||||
|
|
||||||
|
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||||
|
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||||
|
|
||||||
|
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||||
|
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||||
|
```
|
||||||
|
|
||||||
|
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
|
||||||
|
attached, the output is compatible with the
|
||||||
|
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||||
|
|
||||||
|
```text
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||||
|
exit status 1
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
|
||||||
|
The simplest way to use Logrus is simply the package-level exported logger:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
}).Info("A walrus appears")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||||
|
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
||||||
|
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||||
|
want:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Log as JSON instead of the default ASCII formatter.
|
||||||
|
log.SetFormatter(&log.JSONFormatter{})
|
||||||
|
|
||||||
|
// Output to stdout instead of the default stderr, could also be a file.
|
||||||
|
log.SetOutput(os.Stdout)
|
||||||
|
|
||||||
|
// Only log the warning severity or above.
|
||||||
|
log.SetLevel(log.WarnLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A group of walrus emerges from the ocean")
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 122,
|
||||||
|
}).Warn("The group's number increased tremendously!")
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 100,
|
||||||
|
}).Fatal("The ice breaks!")
|
||||||
|
|
||||||
|
// A common pattern is to re-use fields between logging statements by re-using
|
||||||
|
// the logrus.Entry returned from WithFields()
|
||||||
|
contextLogger := log.WithFields(log.Fields{
|
||||||
|
"common": "this is a common field",
|
||||||
|
"other": "I also should be logged always",
|
||||||
|
})
|
||||||
|
|
||||||
|
contextLogger.Info("I'll be logged with common and other field")
|
||||||
|
contextLogger.Info("Me too")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For more advanced usage such as logging to multiple locations from the same
|
||||||
|
application, you can also create an instance of the `logrus` Logger:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create a new instance of the logger. You can have any number of instances.
|
||||||
|
var log = logrus.New()
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// The API for setting attributes is a little different than the package level
|
||||||
|
// exported logger. See Godoc.
|
||||||
|
log.Out = os.Stderr
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A group of walrus emerges from the ocean")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Fields
|
||||||
|
|
||||||
|
Logrus encourages careful, structured logging though logging fields instead of
|
||||||
|
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||||
|
to send event %s to topic %s with key %d")`, you should log the much more
|
||||||
|
discoverable:
|
||||||
|
|
||||||
|
```go
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"event": event,
|
||||||
|
"topic": topic,
|
||||||
|
"key": key,
|
||||||
|
}).Fatal("Failed to send event")
|
||||||
|
```
|
||||||
|
|
||||||
|
We've found this API forces you to think about logging in a way that produces
|
||||||
|
much more useful logging messages. We've been in countless situations where just
|
||||||
|
a single added field to a log statement that was already there would've saved us
|
||||||
|
hours. The `WithFields` call is optional.
|
||||||
|
|
||||||
|
In general, with Logrus using any of the `printf`-family functions should be
|
||||||
|
seen as a hint you should add a field, however, you can still use the
|
||||||
|
`printf`-family functions with Logrus.
|
||||||
|
|
||||||
|
#### Hooks
|
||||||
|
|
||||||
|
You can add hooks for logging levels. For example to send errors to an exception
|
||||||
|
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||||
|
multiple places simultaneously, e.g. syslog.
|
||||||
|
|
||||||
|
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||||
|
`init`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||||
|
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||||
|
"log/syslog"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
|
||||||
|
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||||
|
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||||
|
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||||
|
|
||||||
|
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Unable to connect to local syslog daemon")
|
||||||
|
} else {
|
||||||
|
log.AddHook(hook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||||
|
|
||||||
|
| Hook | Description |
|
||||||
|
| ----- | ----------- |
|
||||||
|
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||||
|
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||||
|
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||||
|
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||||
|
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||||
|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||||
|
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||||
|
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||||
|
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||||
|
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||||
|
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||||
|
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||||
|
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||||
|
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||||
|
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||||
|
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||||
|
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||||
|
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||||
|
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
|
||||||
|
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||||
|
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||||
|
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||||
|
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
||||||
|
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
||||||
|
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
||||||
|
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
||||||
|
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
||||||
|
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
||||||
|
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
|
||||||
|
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
||||||
|
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
|
||||||
|
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
|
||||||
|
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
|
||||||
|
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
|
||||||
|
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||||
|
|
||||||
|
|
||||||
|
#### Level logging
|
||||||
|
|
||||||
|
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||||
|
|
||||||
|
```go
|
||||||
|
log.Debug("Useful debugging information.")
|
||||||
|
log.Info("Something noteworthy happened!")
|
||||||
|
log.Warn("You should probably take a look at this.")
|
||||||
|
log.Error("Something failed but I'm not quitting.")
|
||||||
|
// Calls os.Exit(1) after logging
|
||||||
|
log.Fatal("Bye.")
|
||||||
|
// Calls panic() after logging
|
||||||
|
log.Panic("I'm bailing.")
|
||||||
|
```
|
||||||
|
|
||||||
|
You can set the logging level on a `Logger`, then it will only log entries with
|
||||||
|
that severity or anything above it:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||||
|
log.SetLevel(log.InfoLevel)
|
||||||
|
```
|
||||||
|
|
||||||
|
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||||
|
environment if your application has that.
|
||||||
|
|
||||||
|
#### Entries
|
||||||
|
|
||||||
|
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||||
|
automatically added to all logging events:
|
||||||
|
|
||||||
|
1. `time`. The timestamp when the entry was created.
|
||||||
|
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||||
|
the `AddFields` call. E.g. `Failed to send event.`
|
||||||
|
3. `level`. The logging level. E.g. `info`.
|
||||||
|
|
||||||
|
#### Environments
|
||||||
|
|
||||||
|
Logrus has no notion of environment.
|
||||||
|
|
||||||
|
If you wish for hooks and formatters to only be used in specific environments,
|
||||||
|
you should handle that yourself. For example, if your application has a global
|
||||||
|
variable `Environment`, which is a string representation of the environment you
|
||||||
|
could do:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
init() {
|
||||||
|
// do something here to set environment depending on an environment variable
|
||||||
|
// or command-line flag
|
||||||
|
if Environment == "production" {
|
||||||
|
log.SetFormatter(&log.JSONFormatter{})
|
||||||
|
} else {
|
||||||
|
// The TextFormatter is default, you don't actually have to do this.
|
||||||
|
log.SetFormatter(&log.TextFormatter{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This configuration is how `logrus` was intended to be used, but JSON in
|
||||||
|
production is mostly only useful if you do log aggregation with tools like
|
||||||
|
Splunk or Logstash.
|
||||||
|
|
||||||
|
#### Formatters
|
||||||
|
|
||||||
|
The built-in logging formatters are:
|
||||||
|
|
||||||
|
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||||
|
without colors.
|
||||||
|
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||||
|
field to `true`. To force no colored output even if there is a TTY set the
|
||||||
|
`DisableColors` field to `true`
|
||||||
|
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||||
|
|
||||||
|
Third party logging formatters:
|
||||||
|
|
||||||
|
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||||
|
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||||
|
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||||
|
|
||||||
|
You can define your formatter by implementing the `Formatter` interface,
|
||||||
|
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||||
|
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||||
|
default ones (see Entries section above):
|
||||||
|
|
||||||
|
```go
|
||||||
|
type MyJSONFormatter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
log.SetFormatter(new(MyJSONFormatter))
|
||||||
|
|
||||||
|
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
|
// Note this doesn't include Time, Level and Message which are available on
|
||||||
|
// the Entry. Consult `godoc` on information about those fields or read the
|
||||||
|
// source of the official loggers.
|
||||||
|
serialized, err := json.Marshal(entry.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||||
|
}
|
||||||
|
return append(serialized, '\n'), nil
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Logger as an `io.Writer`
|
||||||
|
|
||||||
|
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||||
|
|
||||||
|
```go
|
||||||
|
w := logger.Writer()
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
srv := http.Server{
|
||||||
|
// create a stdlib log.Logger that writes to
|
||||||
|
// logrus.Logger.
|
||||||
|
ErrorLog: log.New(w, "", 0),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Each line written to that writer will be printed the usual way, using formatters
|
||||||
|
and hooks. The level for those entries is `info`.
|
||||||
|
|
||||||
|
#### Rotation
|
||||||
|
|
||||||
|
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||||
|
external program (like `logrotate(8)`) that can compress and delete old log
|
||||||
|
entries. It should not be a feature of the application-level logger.
|
||||||
|
|
||||||
|
#### Tools
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
| ---- | ----------- |
|
||||||
|
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||||
|
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
|
||||||
|
|
||||||
|
#### Testing
|
||||||
|
|
||||||
|
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
|
||||||
|
|
||||||
|
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
|
||||||
|
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
||||||
|
|
||||||
|
```go
|
||||||
|
logger, hook := NewNullLogger()
|
||||||
|
logger.Error("Hello error")
|
||||||
|
|
||||||
|
assert.Equal(1, len(hook.Entries))
|
||||||
|
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
||||||
|
assert.Equal("Hello error", hook.LastEntry().Message)
|
||||||
|
|
||||||
|
hook.Reset()
|
||||||
|
assert.Nil(hook.LastEntry())
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Fatal handlers
|
||||||
|
|
||||||
|
Logrus can register one or more functions that will be called when any `fatal`
|
||||||
|
level message is logged. The registered handlers will be executed before
|
||||||
|
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
|
||||||
|
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
|
||||||
|
|
||||||
|
```
|
||||||
|
...
|
||||||
|
handler := func() {
|
||||||
|
// gracefully shutdown something...
|
||||||
|
}
|
||||||
|
logrus.RegisterExitHandler(handler)
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Thread safety
|
||||||
|
|
||||||
|
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
|
||||||
|
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
||||||
|
|
||||||
|
Situation when locking is not needed includes:
|
||||||
|
|
||||||
|
* You have no hooks registered, or hooks calling is already thread-safe.
|
||||||
|
|
||||||
|
* Writing to logger.Out is already thread-safe, for example:
|
||||||
|
|
||||||
|
1) logger.Out is protected by locks.
|
||||||
|
|
||||||
|
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
|
||||||
|
|
||||||
|
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
|
64
vendor/src/github.com/Sirupsen/logrus/alt_exit.go
vendored
Normal file
64
vendor/src/github.com/Sirupsen/logrus/alt_exit.go
vendored
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
// The following code was sourced and modified from the
|
||||||
|
// https://bitbucket.org/tebeka/atexit package governed by the following license:
|
||||||
|
//
|
||||||
|
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
// this software and associated documentation files (the "Software"), to deal in
|
||||||
|
// the Software without restriction, including without limitation the rights to
|
||||||
|
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
// subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var handlers = []func(){}
|
||||||
|
|
||||||
|
func runHandler(handler func()) {
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
handler()
|
||||||
|
}
|
||||||
|
|
||||||
|
func runHandlers() {
|
||||||
|
for _, handler := range handlers {
|
||||||
|
runHandler(handler)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
|
||||||
|
func Exit(code int) {
|
||||||
|
runHandlers()
|
||||||
|
os.Exit(code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
|
||||||
|
// all handlers. The handlers will also be invoked when any Fatal log entry is
|
||||||
|
// made.
|
||||||
|
//
|
||||||
|
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||||
|
// message but also needs to gracefully shutdown. An example usecase could be
|
||||||
|
// closing database connections, or sending a alert that the application is
|
||||||
|
// closing.
|
||||||
|
func RegisterExitHandler(handler func()) {
|
||||||
|
handlers = append(handlers, handler)
|
||||||
|
}
|
74
vendor/src/github.com/Sirupsen/logrus/alt_exit_test.go
vendored
Normal file
74
vendor/src/github.com/Sirupsen/logrus/alt_exit_test.go
vendored
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os/exec"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRegister(t *testing.T) {
|
||||||
|
current := len(handlers)
|
||||||
|
RegisterExitHandler(func() {})
|
||||||
|
if len(handlers) != current+1 {
|
||||||
|
t.Fatalf("can't add handler")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandler(t *testing.T) {
|
||||||
|
gofile := "/tmp/testprog.go"
|
||||||
|
if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil {
|
||||||
|
t.Fatalf("can't create go file")
|
||||||
|
}
|
||||||
|
|
||||||
|
outfile := "/tmp/testprog.out"
|
||||||
|
arg := time.Now().UTC().String()
|
||||||
|
err := exec.Command("go", "run", gofile, outfile, arg).Run()
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("completed normally, should have failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := ioutil.ReadFile(outfile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("can't read output file %s", outfile)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(data) != arg {
|
||||||
|
t.Fatalf("bad data")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var testprog = []byte(`
|
||||||
|
// Test program for atexit, gets output file and data as arguments and writes
|
||||||
|
// data to output file in atexit handler.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var outfile = ""
|
||||||
|
var data = ""
|
||||||
|
|
||||||
|
func handler() {
|
||||||
|
ioutil.WriteFile(outfile, []byte(data), 0666)
|
||||||
|
}
|
||||||
|
|
||||||
|
func badHandler() {
|
||||||
|
n := 0
|
||||||
|
fmt.Println(1/n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
outfile = flag.Arg(0)
|
||||||
|
data = flag.Arg(1)
|
||||||
|
|
||||||
|
logrus.RegisterExitHandler(handler)
|
||||||
|
logrus.RegisterExitHandler(badHandler)
|
||||||
|
logrus.Fatal("Bye bye")
|
||||||
|
}
|
||||||
|
`)
|
26
vendor/src/github.com/Sirupsen/logrus/doc.go
vendored
Normal file
26
vendor/src/github.com/Sirupsen/logrus/doc.go
vendored
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
/*
|
||||||
|
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||||
|
|
||||||
|
|
||||||
|
The simplest way to use Logrus is simply the package-level exported logger:
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"number": 1,
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A walrus appears")
|
||||||
|
}
|
||||||
|
|
||||||
|
Output:
|
||||||
|
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||||
|
|
||||||
|
For a full guide visit https://github.com/Sirupsen/logrus
|
||||||
|
*/
|
||||||
|
package logrus
|
275
vendor/src/github.com/Sirupsen/logrus/entry.go
vendored
Normal file
275
vendor/src/github.com/Sirupsen/logrus/entry.go
vendored
Normal file
|
@ -0,0 +1,275 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var bufferPool *sync.Pool
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
bufferPool = &sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return new(bytes.Buffer)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Defines the key when adding errors using WithError.
|
||||||
|
var ErrorKey = "error"
|
||||||
|
|
||||||
|
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||||
|
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||||
|
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||||
|
// passed around as much as you wish to avoid field duplication.
|
||||||
|
type Entry struct {
|
||||||
|
Logger *Logger
|
||||||
|
|
||||||
|
// Contains all the fields set by the user.
|
||||||
|
Data Fields
|
||||||
|
|
||||||
|
// Time at which the log entry was created
|
||||||
|
Time time.Time
|
||||||
|
|
||||||
|
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||||
|
Level Level
|
||||||
|
|
||||||
|
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||||
|
Message string
|
||||||
|
|
||||||
|
// When formatter is called in entry.log(), an Buffer may be set to entry
|
||||||
|
Buffer *bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEntry(logger *Logger) *Entry {
|
||||||
|
return &Entry{
|
||||||
|
Logger: logger,
|
||||||
|
// Default is three fields, give a little extra room
|
||||||
|
Data: make(Fields, 5),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the string representation from the reader and ultimately the
|
||||||
|
// formatter.
|
||||||
|
func (entry *Entry) String() (string, error) {
|
||||||
|
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
str := string(serialized)
|
||||||
|
return str, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
||||||
|
func (entry *Entry) WithError(err error) *Entry {
|
||||||
|
return entry.WithField(ErrorKey, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a single field to the Entry.
|
||||||
|
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||||
|
return entry.WithFields(Fields{key: value})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a map of fields to the Entry.
|
||||||
|
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||||
|
data := make(Fields, len(entry.Data)+len(fields))
|
||||||
|
for k, v := range entry.Data {
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
for k, v := range fields {
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
return &Entry{Logger: entry.Logger, Data: data}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function is not declared with a pointer value because otherwise
|
||||||
|
// race conditions will occur when using multiple goroutines
|
||||||
|
func (entry Entry) log(level Level, msg string) {
|
||||||
|
var buffer *bytes.Buffer
|
||||||
|
entry.Time = time.Now()
|
||||||
|
entry.Level = level
|
||||||
|
entry.Message = msg
|
||||||
|
|
||||||
|
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||||
|
entry.Logger.mu.Unlock()
|
||||||
|
}
|
||||||
|
buffer = bufferPool.Get().(*bytes.Buffer)
|
||||||
|
buffer.Reset()
|
||||||
|
defer bufferPool.Put(buffer)
|
||||||
|
entry.Buffer = buffer
|
||||||
|
serialized, err := entry.Logger.Formatter.Format(&entry)
|
||||||
|
entry.Buffer = nil
|
||||||
|
if err != nil {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||||
|
entry.Logger.mu.Unlock()
|
||||||
|
} else {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
_, err = entry.Logger.Out.Write(serialized)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||||
|
}
|
||||||
|
entry.Logger.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// To avoid Entry#log() returning a value that only would make sense for
|
||||||
|
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||||
|
// directly here.
|
||||||
|
if level <= PanicLevel {
|
||||||
|
panic(&entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Debug(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= DebugLevel {
|
||||||
|
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Print(args ...interface{}) {
|
||||||
|
entry.Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Info(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= InfoLevel {
|
||||||
|
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warn(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= WarnLevel {
|
||||||
|
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warning(args ...interface{}) {
|
||||||
|
entry.Warn(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Error(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= ErrorLevel {
|
||||||
|
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Fatal(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= FatalLevel {
|
||||||
|
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Panic(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= PanicLevel {
|
||||||
|
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
panic(fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entry Printf family functions
|
||||||
|
|
||||||
|
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= DebugLevel {
|
||||||
|
entry.Debug(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= InfoLevel {
|
||||||
|
entry.Info(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||||
|
entry.Infof(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= WarnLevel {
|
||||||
|
entry.Warn(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||||
|
entry.Warnf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= ErrorLevel {
|
||||||
|
entry.Error(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= FatalLevel {
|
||||||
|
entry.Fatal(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= PanicLevel {
|
||||||
|
entry.Panic(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entry Println family functions
|
||||||
|
|
||||||
|
func (entry *Entry) Debugln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= DebugLevel {
|
||||||
|
entry.Debug(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Infoln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= InfoLevel {
|
||||||
|
entry.Info(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Println(args ...interface{}) {
|
||||||
|
entry.Infoln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warnln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= WarnLevel {
|
||||||
|
entry.Warn(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warningln(args ...interface{}) {
|
||||||
|
entry.Warnln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Errorln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= ErrorLevel {
|
||||||
|
entry.Error(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= FatalLevel {
|
||||||
|
entry.Fatal(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Panicln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= PanicLevel {
|
||||||
|
entry.Panic(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||||
|
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||||
|
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||||
|
// string allocation, we do the simplest thing.
|
||||||
|
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||||
|
msg := fmt.Sprintln(args...)
|
||||||
|
return msg[:len(msg)-1]
|
||||||
|
}
|
77
vendor/src/github.com/Sirupsen/logrus/entry_test.go
vendored
Normal file
77
vendor/src/github.com/Sirupsen/logrus/entry_test.go
vendored
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEntryWithError(t *testing.T) {
|
||||||
|
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
ErrorKey = "error"
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := fmt.Errorf("kaboom at layer %d", 4711)
|
||||||
|
|
||||||
|
assert.Equal(err, WithError(err).Data["error"])
|
||||||
|
|
||||||
|
logger := New()
|
||||||
|
logger.Out = &bytes.Buffer{}
|
||||||
|
entry := NewEntry(logger)
|
||||||
|
|
||||||
|
assert.Equal(err, entry.WithError(err).Data["error"])
|
||||||
|
|
||||||
|
ErrorKey = "err"
|
||||||
|
|
||||||
|
assert.Equal(err, entry.WithError(err).Data["err"])
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEntryPanicln(t *testing.T) {
|
||||||
|
errBoom := fmt.Errorf("boom time")
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
p := recover()
|
||||||
|
assert.NotNil(t, p)
|
||||||
|
|
||||||
|
switch pVal := p.(type) {
|
||||||
|
case *Entry:
|
||||||
|
assert.Equal(t, "kaboom", pVal.Message)
|
||||||
|
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||||
|
default:
|
||||||
|
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
logger := New()
|
||||||
|
logger.Out = &bytes.Buffer{}
|
||||||
|
entry := NewEntry(logger)
|
||||||
|
entry.WithField("err", errBoom).Panicln("kaboom")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEntryPanicf(t *testing.T) {
|
||||||
|
errBoom := fmt.Errorf("boom again")
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
p := recover()
|
||||||
|
assert.NotNil(t, p)
|
||||||
|
|
||||||
|
switch pVal := p.(type) {
|
||||||
|
case *Entry:
|
||||||
|
assert.Equal(t, "kaboom true", pVal.Message)
|
||||||
|
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||||
|
default:
|
||||||
|
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
logger := New()
|
||||||
|
logger.Out = &bytes.Buffer{}
|
||||||
|
entry := NewEntry(logger)
|
||||||
|
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
|
||||||
|
}
|
50
vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go
vendored
Normal file
50
vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logrus.New()
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log.Formatter = new(logrus.JSONFormatter)
|
||||||
|
log.Formatter = new(logrus.TextFormatter) // default
|
||||||
|
log.Level = logrus.DebugLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
defer func() {
|
||||||
|
err := recover()
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"err": err,
|
||||||
|
"number": 100,
|
||||||
|
}).Fatal("The ice breaks!")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"number": 8,
|
||||||
|
}).Debug("Started observing beach")
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A group of walrus emerges from the ocean")
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 122,
|
||||||
|
}).Warn("The group's number increased tremendously!")
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"temperature": -4,
|
||||||
|
}).Debug("Temperature changes")
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"animal": "orca",
|
||||||
|
"size": 9009,
|
||||||
|
}).Panic("It's over 9000!")
|
||||||
|
}
|
30
vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go
vendored
Normal file
30
vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"gopkg.in/gemnasium/logrus-airbrake-hook.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logrus.New()
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log.Formatter = new(logrus.TextFormatter) // default
|
||||||
|
log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A group of walrus emerges from the ocean")
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 122,
|
||||||
|
}).Warn("The group's number increased tremendously!")
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 100,
|
||||||
|
}).Fatal("The ice breaks!")
|
||||||
|
}
|
193
vendor/src/github.com/Sirupsen/logrus/exported.go
vendored
Normal file
193
vendor/src/github.com/Sirupsen/logrus/exported.go
vendored
Normal file
|
@ -0,0 +1,193 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// std is the name of the standard logger in stdlib `log`
|
||||||
|
std = New()
|
||||||
|
)
|
||||||
|
|
||||||
|
func StandardLogger() *Logger {
|
||||||
|
return std
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetOutput sets the standard logger output.
|
||||||
|
func SetOutput(out io.Writer) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Out = out
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFormatter sets the standard logger formatter.
|
||||||
|
func SetFormatter(formatter Formatter) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Formatter = formatter
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLevel sets the standard logger level.
|
||||||
|
func SetLevel(level Level) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Level = level
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLevel returns the standard logger level.
|
||||||
|
func GetLevel() Level {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
return std.Level
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddHook adds a hook to the standard logger hooks.
|
||||||
|
func AddHook(hook Hook) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Hooks.Add(hook)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||||
|
func WithError(err error) *Entry {
|
||||||
|
return std.WithField(ErrorKey, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithField creates an entry from the standard logger and adds a field to
|
||||||
|
// it. If you want multiple fields, use `WithFields`.
|
||||||
|
//
|
||||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||||
|
// or Panic on the Entry it returns.
|
||||||
|
func WithField(key string, value interface{}) *Entry {
|
||||||
|
return std.WithField(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFields creates an entry from the standard logger and adds multiple
|
||||||
|
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||||
|
// once for each field.
|
||||||
|
//
|
||||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||||
|
// or Panic on the Entry it returns.
|
||||||
|
func WithFields(fields Fields) *Entry {
|
||||||
|
return std.WithFields(fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug logs a message at level Debug on the standard logger.
|
||||||
|
func Debug(args ...interface{}) {
|
||||||
|
std.Debug(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print logs a message at level Info on the standard logger.
|
||||||
|
func Print(args ...interface{}) {
|
||||||
|
std.Print(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info logs a message at level Info on the standard logger.
|
||||||
|
func Info(args ...interface{}) {
|
||||||
|
std.Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn logs a message at level Warn on the standard logger.
|
||||||
|
func Warn(args ...interface{}) {
|
||||||
|
std.Warn(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warning logs a message at level Warn on the standard logger.
|
||||||
|
func Warning(args ...interface{}) {
|
||||||
|
std.Warning(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error logs a message at level Error on the standard logger.
|
||||||
|
func Error(args ...interface{}) {
|
||||||
|
std.Error(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panic logs a message at level Panic on the standard logger.
|
||||||
|
func Panic(args ...interface{}) {
|
||||||
|
std.Panic(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatal logs a message at level Fatal on the standard logger.
|
||||||
|
func Fatal(args ...interface{}) {
|
||||||
|
std.Fatal(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugf logs a message at level Debug on the standard logger.
|
||||||
|
func Debugf(format string, args ...interface{}) {
|
||||||
|
std.Debugf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf logs a message at level Info on the standard logger.
|
||||||
|
func Printf(format string, args ...interface{}) {
|
||||||
|
std.Printf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infof logs a message at level Info on the standard logger.
|
||||||
|
func Infof(format string, args ...interface{}) {
|
||||||
|
std.Infof(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnf logs a message at level Warn on the standard logger.
|
||||||
|
func Warnf(format string, args ...interface{}) {
|
||||||
|
std.Warnf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warningf logs a message at level Warn on the standard logger.
|
||||||
|
func Warningf(format string, args ...interface{}) {
|
||||||
|
std.Warningf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorf logs a message at level Error on the standard logger.
|
||||||
|
func Errorf(format string, args ...interface{}) {
|
||||||
|
std.Errorf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panicf logs a message at level Panic on the standard logger.
|
||||||
|
func Panicf(format string, args ...interface{}) {
|
||||||
|
std.Panicf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalf logs a message at level Fatal on the standard logger.
|
||||||
|
func Fatalf(format string, args ...interface{}) {
|
||||||
|
std.Fatalf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugln logs a message at level Debug on the standard logger.
|
||||||
|
func Debugln(args ...interface{}) {
|
||||||
|
std.Debugln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println logs a message at level Info on the standard logger.
|
||||||
|
func Println(args ...interface{}) {
|
||||||
|
std.Println(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infoln logs a message at level Info on the standard logger.
|
||||||
|
func Infoln(args ...interface{}) {
|
||||||
|
std.Infoln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnln logs a message at level Warn on the standard logger.
|
||||||
|
func Warnln(args ...interface{}) {
|
||||||
|
std.Warnln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warningln logs a message at level Warn on the standard logger.
|
||||||
|
func Warningln(args ...interface{}) {
|
||||||
|
std.Warningln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorln logs a message at level Error on the standard logger.
|
||||||
|
func Errorln(args ...interface{}) {
|
||||||
|
std.Errorln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panicln logs a message at level Panic on the standard logger.
|
||||||
|
func Panicln(args ...interface{}) {
|
||||||
|
std.Panicln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalln logs a message at level Fatal on the standard logger.
|
||||||
|
func Fatalln(args ...interface{}) {
|
||||||
|
std.Fatalln(args...)
|
||||||
|
}
|
45
vendor/src/github.com/Sirupsen/logrus/formatter.go
vendored
Normal file
45
vendor/src/github.com/Sirupsen/logrus/formatter.go
vendored
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const DefaultTimestampFormat = time.RFC3339
|
||||||
|
|
||||||
|
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||||
|
// `Entry`. It exposes all the fields, including the default ones:
|
||||||
|
//
|
||||||
|
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||||
|
// * `entry.Data["time"]`. The timestamp.
|
||||||
|
// * `entry.Data["level"]. The level the entry was logged at.
|
||||||
|
//
|
||||||
|
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||||
|
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||||
|
// logged to `logger.Out`.
|
||||||
|
type Formatter interface {
|
||||||
|
Format(*Entry) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||||
|
// dumping it. If this code wasn't there doing:
|
||||||
|
//
|
||||||
|
// logrus.WithField("level", 1).Info("hello")
|
||||||
|
//
|
||||||
|
// Would just silently drop the user provided level. Instead with this code
|
||||||
|
// it'll logged as:
|
||||||
|
//
|
||||||
|
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||||
|
//
|
||||||
|
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||||
|
// avoid code duplication between the two default formatters.
|
||||||
|
func prefixFieldClashes(data Fields) {
|
||||||
|
if t, ok := data["time"]; ok {
|
||||||
|
data["fields.time"] = t
|
||||||
|
}
|
||||||
|
|
||||||
|
if m, ok := data["msg"]; ok {
|
||||||
|
data["fields.msg"] = m
|
||||||
|
}
|
||||||
|
|
||||||
|
if l, ok := data["level"]; ok {
|
||||||
|
data["fields.level"] = l
|
||||||
|
}
|
||||||
|
}
|
98
vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go
vendored
Normal file
98
vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go
vendored
Normal file
|
@ -0,0 +1,98 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// smallFields is a small size data set for benchmarking
|
||||||
|
var smallFields = Fields{
|
||||||
|
"foo": "bar",
|
||||||
|
"baz": "qux",
|
||||||
|
"one": "two",
|
||||||
|
"three": "four",
|
||||||
|
}
|
||||||
|
|
||||||
|
// largeFields is a large size data set for benchmarking
|
||||||
|
var largeFields = Fields{
|
||||||
|
"foo": "bar",
|
||||||
|
"baz": "qux",
|
||||||
|
"one": "two",
|
||||||
|
"three": "four",
|
||||||
|
"five": "six",
|
||||||
|
"seven": "eight",
|
||||||
|
"nine": "ten",
|
||||||
|
"eleven": "twelve",
|
||||||
|
"thirteen": "fourteen",
|
||||||
|
"fifteen": "sixteen",
|
||||||
|
"seventeen": "eighteen",
|
||||||
|
"nineteen": "twenty",
|
||||||
|
"a": "b",
|
||||||
|
"c": "d",
|
||||||
|
"e": "f",
|
||||||
|
"g": "h",
|
||||||
|
"i": "j",
|
||||||
|
"k": "l",
|
||||||
|
"m": "n",
|
||||||
|
"o": "p",
|
||||||
|
"q": "r",
|
||||||
|
"s": "t",
|
||||||
|
"u": "v",
|
||||||
|
"w": "x",
|
||||||
|
"y": "z",
|
||||||
|
"this": "will",
|
||||||
|
"make": "thirty",
|
||||||
|
"entries": "yeah",
|
||||||
|
}
|
||||||
|
|
||||||
|
var errorFields = Fields{
|
||||||
|
"foo": fmt.Errorf("bar"),
|
||||||
|
"baz": fmt.Errorf("qux"),
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkErrorTextFormatter(b *testing.B) {
|
||||||
|
doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSmallTextFormatter(b *testing.B) {
|
||||||
|
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkLargeTextFormatter(b *testing.B) {
|
||||||
|
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
|
||||||
|
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
|
||||||
|
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSmallJSONFormatter(b *testing.B) {
|
||||||
|
doBenchmark(b, &JSONFormatter{}, smallFields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkLargeJSONFormatter(b *testing.B) {
|
||||||
|
doBenchmark(b, &JSONFormatter{}, largeFields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
|
||||||
|
entry := &Entry{
|
||||||
|
Time: time.Time{},
|
||||||
|
Level: InfoLevel,
|
||||||
|
Message: "message",
|
||||||
|
Data: fields,
|
||||||
|
}
|
||||||
|
var d []byte
|
||||||
|
var err error
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
d, err = formatter.Format(entry)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
b.SetBytes(int64(len(d)))
|
||||||
|
}
|
||||||
|
}
|
122
vendor/src/github.com/Sirupsen/logrus/hook_test.go
vendored
Normal file
122
vendor/src/github.com/Sirupsen/logrus/hook_test.go
vendored
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TestHook struct {
|
||||||
|
Fired bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *TestHook) Fire(entry *Entry) error {
|
||||||
|
hook.Fired = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *TestHook) Levels() []Level {
|
||||||
|
return []Level{
|
||||||
|
DebugLevel,
|
||||||
|
InfoLevel,
|
||||||
|
WarnLevel,
|
||||||
|
ErrorLevel,
|
||||||
|
FatalLevel,
|
||||||
|
PanicLevel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHookFires(t *testing.T) {
|
||||||
|
hook := new(TestHook)
|
||||||
|
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Hooks.Add(hook)
|
||||||
|
assert.Equal(t, hook.Fired, false)
|
||||||
|
|
||||||
|
log.Print("test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, hook.Fired, true)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type ModifyHook struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *ModifyHook) Fire(entry *Entry) error {
|
||||||
|
entry.Data["wow"] = "whale"
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *ModifyHook) Levels() []Level {
|
||||||
|
return []Level{
|
||||||
|
DebugLevel,
|
||||||
|
InfoLevel,
|
||||||
|
WarnLevel,
|
||||||
|
ErrorLevel,
|
||||||
|
FatalLevel,
|
||||||
|
PanicLevel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHookCanModifyEntry(t *testing.T) {
|
||||||
|
hook := new(ModifyHook)
|
||||||
|
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Hooks.Add(hook)
|
||||||
|
log.WithField("wow", "elephant").Print("test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["wow"], "whale")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCanFireMultipleHooks(t *testing.T) {
|
||||||
|
hook1 := new(ModifyHook)
|
||||||
|
hook2 := new(TestHook)
|
||||||
|
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Hooks.Add(hook1)
|
||||||
|
log.Hooks.Add(hook2)
|
||||||
|
|
||||||
|
log.WithField("wow", "elephant").Print("test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["wow"], "whale")
|
||||||
|
assert.Equal(t, hook2.Fired, true)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type ErrorHook struct {
|
||||||
|
Fired bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *ErrorHook) Fire(entry *Entry) error {
|
||||||
|
hook.Fired = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *ErrorHook) Levels() []Level {
|
||||||
|
return []Level{
|
||||||
|
ErrorLevel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
|
||||||
|
hook := new(ErrorHook)
|
||||||
|
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Hooks.Add(hook)
|
||||||
|
log.Info("test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, hook.Fired, false)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorHookShouldFireOnError(t *testing.T) {
|
||||||
|
hook := new(ErrorHook)
|
||||||
|
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Hooks.Add(hook)
|
||||||
|
log.Error("test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, hook.Fired, true)
|
||||||
|
})
|
||||||
|
}
|
34
vendor/src/github.com/Sirupsen/logrus/hooks.go
vendored
Normal file
34
vendor/src/github.com/Sirupsen/logrus/hooks.go
vendored
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
// A hook to be fired when logging on the logging levels returned from
|
||||||
|
// `Levels()` on your implementation of the interface. Note that this is not
|
||||||
|
// fired in a goroutine or a channel with workers, you should handle such
|
||||||
|
// functionality yourself if your call is non-blocking and you don't wish for
|
||||||
|
// the logging calls for levels returned from `Levels()` to block.
|
||||||
|
type Hook interface {
|
||||||
|
Levels() []Level
|
||||||
|
Fire(*Entry) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal type for storing the hooks on a logger instance.
|
||||||
|
type LevelHooks map[Level][]Hook
|
||||||
|
|
||||||
|
// Add a hook to an instance of logger. This is called with
|
||||||
|
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||||
|
func (hooks LevelHooks) Add(hook Hook) {
|
||||||
|
for _, level := range hook.Levels() {
|
||||||
|
hooks[level] = append(hooks[level], hook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||||
|
// appropriate hooks for a log entry.
|
||||||
|
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
||||||
|
for _, hook := range hooks[level] {
|
||||||
|
if err := hook.Fire(entry); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
39
vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
vendored
Normal file
39
vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
vendored
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"log/syslog"
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log := logrus.New()
|
||||||
|
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
log.Hooks.Add(hook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following.
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"log/syslog"
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log := logrus.New()
|
||||||
|
hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
log.Hooks.Add(hook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
54
vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
vendored
Normal file
54
vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
// +build !windows,!nacl,!plan9
|
||||||
|
|
||||||
|
package logrus_syslog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"log/syslog"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SyslogHook to send logs via syslog.
|
||||||
|
type SyslogHook struct {
|
||||||
|
Writer *syslog.Writer
|
||||||
|
SyslogNetwork string
|
||||||
|
SyslogRaddr string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a hook to be added to an instance of logger. This is called with
|
||||||
|
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
|
||||||
|
// `if err == nil { log.Hooks.Add(hook) }`
|
||||||
|
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
|
||||||
|
w, err := syslog.Dial(network, raddr, priority, tag)
|
||||||
|
return &SyslogHook{w, network, raddr}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
|
||||||
|
line, err := entry.String()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch entry.Level {
|
||||||
|
case logrus.PanicLevel:
|
||||||
|
return hook.Writer.Crit(line)
|
||||||
|
case logrus.FatalLevel:
|
||||||
|
return hook.Writer.Crit(line)
|
||||||
|
case logrus.ErrorLevel:
|
||||||
|
return hook.Writer.Err(line)
|
||||||
|
case logrus.WarnLevel:
|
||||||
|
return hook.Writer.Warning(line)
|
||||||
|
case logrus.InfoLevel:
|
||||||
|
return hook.Writer.Info(line)
|
||||||
|
case logrus.DebugLevel:
|
||||||
|
return hook.Writer.Debug(line)
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *SyslogHook) Levels() []logrus.Level {
|
||||||
|
return logrus.AllLevels
|
||||||
|
}
|
26
vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
vendored
Normal file
26
vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
vendored
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
package logrus_syslog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"log/syslog"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLocalhostAddAndPrint(t *testing.T) {
|
||||||
|
log := logrus.New()
|
||||||
|
hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unable to connect to local syslog.")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Hooks.Add(hook)
|
||||||
|
|
||||||
|
for _, level := range hook.Levels() {
|
||||||
|
if len(log.Hooks[level]) != 1 {
|
||||||
|
t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Congratulations!")
|
||||||
|
}
|
67
vendor/src/github.com/Sirupsen/logrus/hooks/test/test.go
vendored
Normal file
67
vendor/src/github.com/Sirupsen/logrus/hooks/test/test.go
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// test.Hook is a hook designed for dealing with logs in test scenarios.
|
||||||
|
type Hook struct {
|
||||||
|
Entries []*logrus.Entry
|
||||||
|
}
|
||||||
|
|
||||||
|
// Installs a test hook for the global logger.
|
||||||
|
func NewGlobal() *Hook {
|
||||||
|
|
||||||
|
hook := new(Hook)
|
||||||
|
logrus.AddHook(hook)
|
||||||
|
|
||||||
|
return hook
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Installs a test hook for a given local logger.
|
||||||
|
func NewLocal(logger *logrus.Logger) *Hook {
|
||||||
|
|
||||||
|
hook := new(Hook)
|
||||||
|
logger.Hooks.Add(hook)
|
||||||
|
|
||||||
|
return hook
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a discarding logger and installs the test hook.
|
||||||
|
func NewNullLogger() (*logrus.Logger, *Hook) {
|
||||||
|
|
||||||
|
logger := logrus.New()
|
||||||
|
logger.Out = ioutil.Discard
|
||||||
|
|
||||||
|
return logger, NewLocal(logger)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Hook) Fire(e *logrus.Entry) error {
|
||||||
|
t.Entries = append(t.Entries, e)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Hook) Levels() []logrus.Level {
|
||||||
|
return logrus.AllLevels
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastEntry returns the last entry that was logged or nil.
|
||||||
|
func (t *Hook) LastEntry() (l *logrus.Entry) {
|
||||||
|
|
||||||
|
if i := len(t.Entries) - 1; i < 0 {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return t.Entries[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset removes all Entries from this test hook.
|
||||||
|
func (t *Hook) Reset() {
|
||||||
|
t.Entries = make([]*logrus.Entry, 0)
|
||||||
|
}
|
39
vendor/src/github.com/Sirupsen/logrus/hooks/test/test_test.go
vendored
Normal file
39
vendor/src/github.com/Sirupsen/logrus/hooks/test/test_test.go
vendored
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAllHooks(t *testing.T) {
|
||||||
|
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
logger, hook := NewNullLogger()
|
||||||
|
assert.Nil(hook.LastEntry())
|
||||||
|
assert.Equal(0, len(hook.Entries))
|
||||||
|
|
||||||
|
logger.Error("Hello error")
|
||||||
|
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
||||||
|
assert.Equal("Hello error", hook.LastEntry().Message)
|
||||||
|
assert.Equal(1, len(hook.Entries))
|
||||||
|
|
||||||
|
logger.Warn("Hello warning")
|
||||||
|
assert.Equal(logrus.WarnLevel, hook.LastEntry().Level)
|
||||||
|
assert.Equal("Hello warning", hook.LastEntry().Message)
|
||||||
|
assert.Equal(2, len(hook.Entries))
|
||||||
|
|
||||||
|
hook.Reset()
|
||||||
|
assert.Nil(hook.LastEntry())
|
||||||
|
assert.Equal(0, len(hook.Entries))
|
||||||
|
|
||||||
|
hook = NewGlobal()
|
||||||
|
|
||||||
|
logrus.Error("Hello error")
|
||||||
|
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
||||||
|
assert.Equal("Hello error", hook.LastEntry().Message)
|
||||||
|
assert.Equal(1, len(hook.Entries))
|
||||||
|
|
||||||
|
}
|
74
vendor/src/github.com/Sirupsen/logrus/json_formatter.go
vendored
Normal file
74
vendor/src/github.com/Sirupsen/logrus/json_formatter.go
vendored
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fieldKey string
|
||||||
|
type FieldMap map[fieldKey]string
|
||||||
|
|
||||||
|
const (
|
||||||
|
FieldKeyMsg = "msg"
|
||||||
|
FieldKeyLevel = "level"
|
||||||
|
FieldKeyTime = "time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (f FieldMap) resolve(key fieldKey) string {
|
||||||
|
if k, ok := f[key]; ok {
|
||||||
|
return k
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
type JSONFormatter struct {
|
||||||
|
// TimestampFormat sets the format used for marshaling timestamps.
|
||||||
|
TimestampFormat string
|
||||||
|
|
||||||
|
// DisableTimestamp allows disabling automatic timestamps in output
|
||||||
|
DisableTimestamp bool
|
||||||
|
|
||||||
|
// FieldMap allows users to customize the names of keys for various fields.
|
||||||
|
// As an example:
|
||||||
|
// formatter := &JSONFormatter{
|
||||||
|
// FieldMap: FieldMap{
|
||||||
|
// FieldKeyTime: "@timestamp",
|
||||||
|
// FieldKeyLevel: "@level",
|
||||||
|
// FieldKeyLevel: "@message",
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
FieldMap FieldMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
|
data := make(Fields, len(entry.Data)+3)
|
||||||
|
for k, v := range entry.Data {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case error:
|
||||||
|
// Otherwise errors are ignored by `encoding/json`
|
||||||
|
// https://github.com/Sirupsen/logrus/issues/137
|
||||||
|
data[k] = v.Error()
|
||||||
|
default:
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prefixFieldClashes(data)
|
||||||
|
|
||||||
|
timestampFormat := f.TimestampFormat
|
||||||
|
if timestampFormat == "" {
|
||||||
|
timestampFormat = DefaultTimestampFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
if !f.DisableTimestamp {
|
||||||
|
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
||||||
|
}
|
||||||
|
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||||
|
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||||
|
|
||||||
|
serialized, err := json.Marshal(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||||
|
}
|
||||||
|
return append(serialized, '\n'), nil
|
||||||
|
}
|
199
vendor/src/github.com/Sirupsen/logrus/json_formatter_test.go
vendored
Normal file
199
vendor/src/github.com/Sirupsen/logrus/json_formatter_test.go
vendored
Normal file
|
@ -0,0 +1,199 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestErrorNotLost(t *testing.T) {
|
||||||
|
formatter := &JSONFormatter{}
|
||||||
|
|
||||||
|
b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to format entry: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
entry := make(map[string]interface{})
|
||||||
|
err = json.Unmarshal(b, &entry)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry["error"] != "wild walrus" {
|
||||||
|
t.Fatal("Error field not set")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
|
||||||
|
formatter := &JSONFormatter{}
|
||||||
|
|
||||||
|
b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to format entry: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
entry := make(map[string]interface{})
|
||||||
|
err = json.Unmarshal(b, &entry)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry["omg"] != "wild walrus" {
|
||||||
|
t.Fatal("Error field not set")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFieldClashWithTime(t *testing.T) {
|
||||||
|
formatter := &JSONFormatter{}
|
||||||
|
|
||||||
|
b, err := formatter.Format(WithField("time", "right now!"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to format entry: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
entry := make(map[string]interface{})
|
||||||
|
err = json.Unmarshal(b, &entry)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry["fields.time"] != "right now!" {
|
||||||
|
t.Fatal("fields.time not set to original time field")
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry["time"] != "0001-01-01T00:00:00Z" {
|
||||||
|
t.Fatal("time field not set to current time, was: ", entry["time"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFieldClashWithMsg(t *testing.T) {
|
||||||
|
formatter := &JSONFormatter{}
|
||||||
|
|
||||||
|
b, err := formatter.Format(WithField("msg", "something"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to format entry: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
entry := make(map[string]interface{})
|
||||||
|
err = json.Unmarshal(b, &entry)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry["fields.msg"] != "something" {
|
||||||
|
t.Fatal("fields.msg not set to original msg field")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFieldClashWithLevel(t *testing.T) {
|
||||||
|
formatter := &JSONFormatter{}
|
||||||
|
|
||||||
|
b, err := formatter.Format(WithField("level", "something"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to format entry: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
entry := make(map[string]interface{})
|
||||||
|
err = json.Unmarshal(b, &entry)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry["fields.level"] != "something" {
|
||||||
|
t.Fatal("fields.level not set to original level field")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJSONEntryEndsWithNewline(t *testing.T) {
|
||||||
|
formatter := &JSONFormatter{}
|
||||||
|
|
||||||
|
b, err := formatter.Format(WithField("level", "something"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to format entry: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b[len(b)-1] != '\n' {
|
||||||
|
t.Fatal("Expected JSON log entry to end with a newline")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJSONMessageKey(t *testing.T) {
|
||||||
|
formatter := &JSONFormatter{
|
||||||
|
FieldMap: FieldMap{
|
||||||
|
FieldKeyMsg: "message",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := formatter.Format(&Entry{Message: "oh hai"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to format entry: ", err)
|
||||||
|
}
|
||||||
|
s := string(b)
|
||||||
|
if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) {
|
||||||
|
t.Fatal("Expected JSON to format message key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJSONLevelKey(t *testing.T) {
|
||||||
|
formatter := &JSONFormatter{
|
||||||
|
FieldMap: FieldMap{
|
||||||
|
FieldKeyLevel: "somelevel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := formatter.Format(WithField("level", "something"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to format entry: ", err)
|
||||||
|
}
|
||||||
|
s := string(b)
|
||||||
|
if !strings.Contains(s, "somelevel") {
|
||||||
|
t.Fatal("Expected JSON to format level key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJSONTimeKey(t *testing.T) {
|
||||||
|
formatter := &JSONFormatter{
|
||||||
|
FieldMap: FieldMap{
|
||||||
|
FieldKeyTime: "timeywimey",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := formatter.Format(WithField("level", "something"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to format entry: ", err)
|
||||||
|
}
|
||||||
|
s := string(b)
|
||||||
|
if !strings.Contains(s, "timeywimey") {
|
||||||
|
t.Fatal("Expected JSON to format time key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJSONDisableTimestamp(t *testing.T) {
|
||||||
|
formatter := &JSONFormatter{
|
||||||
|
DisableTimestamp: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := formatter.Format(WithField("level", "something"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to format entry: ", err)
|
||||||
|
}
|
||||||
|
s := string(b)
|
||||||
|
if strings.Contains(s, FieldKeyTime) {
|
||||||
|
t.Error("Did not prevent timestamp", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJSONEnableTimestamp(t *testing.T) {
|
||||||
|
formatter := &JSONFormatter{}
|
||||||
|
|
||||||
|
b, err := formatter.Format(WithField("level", "something"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to format entry: ", err)
|
||||||
|
}
|
||||||
|
s := string(b)
|
||||||
|
if !strings.Contains(s, FieldKeyTime) {
|
||||||
|
t.Error("Timestamp not present", s)
|
||||||
|
}
|
||||||
|
}
|
308
vendor/src/github.com/Sirupsen/logrus/logger.go
vendored
Normal file
308
vendor/src/github.com/Sirupsen/logrus/logger.go
vendored
Normal file
|
@ -0,0 +1,308 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Logger struct {
|
||||||
|
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||||
|
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||||
|
// something more adventorous, such as logging to Kafka.
|
||||||
|
Out io.Writer
|
||||||
|
// Hooks for the logger instance. These allow firing events based on logging
|
||||||
|
// levels and log entries. For example, to send errors to an error tracking
|
||||||
|
// service, log to StatsD or dump the core on fatal errors.
|
||||||
|
Hooks LevelHooks
|
||||||
|
// All log entries pass through the formatter before logged to Out. The
|
||||||
|
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||||
|
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||||
|
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||||
|
// own that implements the `Formatter` interface, see the `README` or included
|
||||||
|
// formatters for examples.
|
||||||
|
Formatter Formatter
|
||||||
|
// The logging level the logger should log at. This is typically (and defaults
|
||||||
|
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||||
|
// logged. `logrus.Debug` is useful in
|
||||||
|
Level Level
|
||||||
|
// Used to sync writing to the log. Locking is enabled by Default
|
||||||
|
mu MutexWrap
|
||||||
|
// Reusable empty entry
|
||||||
|
entryPool sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
type MutexWrap struct {
|
||||||
|
lock sync.Mutex
|
||||||
|
disabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *MutexWrap) Lock() {
|
||||||
|
if !mw.disabled {
|
||||||
|
mw.lock.Lock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *MutexWrap) Unlock() {
|
||||||
|
if !mw.disabled {
|
||||||
|
mw.lock.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *MutexWrap) Disable() {
|
||||||
|
mw.disabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||||
|
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||||
|
// instantiate your own:
|
||||||
|
//
|
||||||
|
// var log = &Logger{
|
||||||
|
// Out: os.Stderr,
|
||||||
|
// Formatter: new(JSONFormatter),
|
||||||
|
// Hooks: make(LevelHooks),
|
||||||
|
// Level: logrus.DebugLevel,
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// It's recommended to make this a global instance called `log`.
|
||||||
|
func New() *Logger {
|
||||||
|
return &Logger{
|
||||||
|
Out: os.Stderr,
|
||||||
|
Formatter: new(TextFormatter),
|
||||||
|
Hooks: make(LevelHooks),
|
||||||
|
Level: InfoLevel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) newEntry() *Entry {
|
||||||
|
entry, ok := logger.entryPool.Get().(*Entry)
|
||||||
|
if ok {
|
||||||
|
return entry
|
||||||
|
}
|
||||||
|
return NewEntry(logger)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) releaseEntry(entry *Entry) {
|
||||||
|
logger.entryPool.Put(entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds a field to the log entry, note that it doesn't log until you call
|
||||||
|
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||||
|
// If you want multiple fields, use `WithFields`.
|
||||||
|
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
defer logger.releaseEntry(entry)
|
||||||
|
return entry.WithField(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||||
|
// each `Field`.
|
||||||
|
func (logger *Logger) WithFields(fields Fields) *Entry {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
defer logger.releaseEntry(entry)
|
||||||
|
return entry.WithFields(fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add an error as single field to the log entry. All it does is call
|
||||||
|
// `WithError` for the given `error`.
|
||||||
|
func (logger *Logger) WithError(err error) *Entry {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
defer logger.releaseEntry(entry)
|
||||||
|
return entry.WithError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= DebugLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Debugf(format, args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= InfoLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Infof(format, args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Printf(format, args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Warnf(format, args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Warnf(format, args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= ErrorLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Errorf(format, args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= FatalLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Fatalf(format, args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= PanicLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Panicf(format, args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debug(args ...interface{}) {
|
||||||
|
if logger.Level >= DebugLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Debug(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Info(args ...interface{}) {
|
||||||
|
if logger.Level >= InfoLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Info(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Print(args ...interface{}) {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Info(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warn(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Warn(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warning(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Warn(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Error(args ...interface{}) {
|
||||||
|
if logger.Level >= ErrorLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Error(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Fatal(args ...interface{}) {
|
||||||
|
if logger.Level >= FatalLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Fatal(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Panic(args ...interface{}) {
|
||||||
|
if logger.Level >= PanicLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Panic(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debugln(args ...interface{}) {
|
||||||
|
if logger.Level >= DebugLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Debugln(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Infoln(args ...interface{}) {
|
||||||
|
if logger.Level >= InfoLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Infoln(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Println(args ...interface{}) {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Println(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warnln(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Warnln(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warningln(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Warnln(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Errorln(args ...interface{}) {
|
||||||
|
if logger.Level >= ErrorLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Errorln(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||||
|
if logger.Level >= FatalLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Fatalln(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Panicln(args ...interface{}) {
|
||||||
|
if logger.Level >= PanicLevel {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Panicln(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//When file is opened with appending mode, it's safe to
|
||||||
|
//write concurrently to a file (within 4k message on Linux).
|
||||||
|
//In these cases user can choose to disable the lock.
|
||||||
|
func (logger *Logger) SetNoLock() {
|
||||||
|
logger.mu.Disable()
|
||||||
|
}
|
61
vendor/src/github.com/Sirupsen/logrus/logger_bench_test.go
vendored
Normal file
61
vendor/src/github.com/Sirupsen/logrus/logger_bench_test.go
vendored
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// smallFields is a small size data set for benchmarking
|
||||||
|
var loggerFields = Fields{
|
||||||
|
"foo": "bar",
|
||||||
|
"baz": "qux",
|
||||||
|
"one": "two",
|
||||||
|
"three": "four",
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDummyLogger(b *testing.B) {
|
||||||
|
nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
defer nullf.Close()
|
||||||
|
doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDummyLoggerNoLock(b *testing.B) {
|
||||||
|
nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
defer nullf.Close()
|
||||||
|
doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
|
||||||
|
logger := Logger{
|
||||||
|
Out: out,
|
||||||
|
Level: InfoLevel,
|
||||||
|
Formatter: formatter,
|
||||||
|
}
|
||||||
|
entry := logger.WithFields(fields)
|
||||||
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
|
for pb.Next() {
|
||||||
|
entry.Info("aaa")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
|
||||||
|
logger := Logger{
|
||||||
|
Out: out,
|
||||||
|
Level: InfoLevel,
|
||||||
|
Formatter: formatter,
|
||||||
|
}
|
||||||
|
logger.SetNoLock()
|
||||||
|
entry := logger.WithFields(fields)
|
||||||
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
|
for pb.Next() {
|
||||||
|
entry.Info("aaa")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
143
vendor/src/github.com/Sirupsen/logrus/logrus.go
vendored
Normal file
143
vendor/src/github.com/Sirupsen/logrus/logrus.go
vendored
Normal file
|
@ -0,0 +1,143 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Fields type, used to pass to `WithFields`.
|
||||||
|
type Fields map[string]interface{}
|
||||||
|
|
||||||
|
// Level type
|
||||||
|
type Level uint8
|
||||||
|
|
||||||
|
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||||
|
func (level Level) String() string {
|
||||||
|
switch level {
|
||||||
|
case DebugLevel:
|
||||||
|
return "debug"
|
||||||
|
case InfoLevel:
|
||||||
|
return "info"
|
||||||
|
case WarnLevel:
|
||||||
|
return "warning"
|
||||||
|
case ErrorLevel:
|
||||||
|
return "error"
|
||||||
|
case FatalLevel:
|
||||||
|
return "fatal"
|
||||||
|
case PanicLevel:
|
||||||
|
return "panic"
|
||||||
|
}
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||||
|
func ParseLevel(lvl string) (Level, error) {
|
||||||
|
switch strings.ToLower(lvl) {
|
||||||
|
case "panic":
|
||||||
|
return PanicLevel, nil
|
||||||
|
case "fatal":
|
||||||
|
return FatalLevel, nil
|
||||||
|
case "error":
|
||||||
|
return ErrorLevel, nil
|
||||||
|
case "warn", "warning":
|
||||||
|
return WarnLevel, nil
|
||||||
|
case "info":
|
||||||
|
return InfoLevel, nil
|
||||||
|
case "debug":
|
||||||
|
return DebugLevel, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var l Level
|
||||||
|
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A constant exposing all logging levels
|
||||||
|
var AllLevels = []Level{
|
||||||
|
PanicLevel,
|
||||||
|
FatalLevel,
|
||||||
|
ErrorLevel,
|
||||||
|
WarnLevel,
|
||||||
|
InfoLevel,
|
||||||
|
DebugLevel,
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are the different logging levels. You can set the logging level to log
|
||||||
|
// on your instance of logger, obtained with `logrus.New()`.
|
||||||
|
const (
|
||||||
|
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||||
|
// message passed to Debug, Info, ...
|
||||||
|
PanicLevel Level = iota
|
||||||
|
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||||
|
// logging level is set to Panic.
|
||||||
|
FatalLevel
|
||||||
|
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||||
|
// Commonly used for hooks to send errors to an error tracking service.
|
||||||
|
ErrorLevel
|
||||||
|
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||||
|
WarnLevel
|
||||||
|
// InfoLevel level. General operational entries about what's going on inside the
|
||||||
|
// application.
|
||||||
|
InfoLevel
|
||||||
|
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||||
|
DebugLevel
|
||||||
|
)
|
||||||
|
|
||||||
|
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||||
|
var (
|
||||||
|
_ StdLogger = &log.Logger{}
|
||||||
|
_ StdLogger = &Entry{}
|
||||||
|
_ StdLogger = &Logger{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// StdLogger is what your logrus-enabled library should take, that way
|
||||||
|
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||||
|
// interface, this is the closest we get, unfortunately.
|
||||||
|
type StdLogger interface {
|
||||||
|
Print(...interface{})
|
||||||
|
Printf(string, ...interface{})
|
||||||
|
Println(...interface{})
|
||||||
|
|
||||||
|
Fatal(...interface{})
|
||||||
|
Fatalf(string, ...interface{})
|
||||||
|
Fatalln(...interface{})
|
||||||
|
|
||||||
|
Panic(...interface{})
|
||||||
|
Panicf(string, ...interface{})
|
||||||
|
Panicln(...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// The FieldLogger interface generalizes the Entry and Logger types
|
||||||
|
type FieldLogger interface {
|
||||||
|
WithField(key string, value interface{}) *Entry
|
||||||
|
WithFields(fields Fields) *Entry
|
||||||
|
WithError(err error) *Entry
|
||||||
|
|
||||||
|
Debugf(format string, args ...interface{})
|
||||||
|
Infof(format string, args ...interface{})
|
||||||
|
Printf(format string, args ...interface{})
|
||||||
|
Warnf(format string, args ...interface{})
|
||||||
|
Warningf(format string, args ...interface{})
|
||||||
|
Errorf(format string, args ...interface{})
|
||||||
|
Fatalf(format string, args ...interface{})
|
||||||
|
Panicf(format string, args ...interface{})
|
||||||
|
|
||||||
|
Debug(args ...interface{})
|
||||||
|
Info(args ...interface{})
|
||||||
|
Print(args ...interface{})
|
||||||
|
Warn(args ...interface{})
|
||||||
|
Warning(args ...interface{})
|
||||||
|
Error(args ...interface{})
|
||||||
|
Fatal(args ...interface{})
|
||||||
|
Panic(args ...interface{})
|
||||||
|
|
||||||
|
Debugln(args ...interface{})
|
||||||
|
Infoln(args ...interface{})
|
||||||
|
Println(args ...interface{})
|
||||||
|
Warnln(args ...interface{})
|
||||||
|
Warningln(args ...interface{})
|
||||||
|
Errorln(args ...interface{})
|
||||||
|
Fatalln(args ...interface{})
|
||||||
|
Panicln(args ...interface{})
|
||||||
|
}
|
361
vendor/src/github.com/Sirupsen/logrus/logrus_test.go
vendored
Normal file
361
vendor/src/github.com/Sirupsen/logrus/logrus_test.go
vendored
Normal file
|
@ -0,0 +1,361 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
var fields Fields
|
||||||
|
|
||||||
|
logger := New()
|
||||||
|
logger.Out = &buffer
|
||||||
|
logger.Formatter = new(JSONFormatter)
|
||||||
|
|
||||||
|
log(logger)
|
||||||
|
|
||||||
|
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
assertions(fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
|
||||||
|
logger := New()
|
||||||
|
logger.Out = &buffer
|
||||||
|
logger.Formatter = &TextFormatter{
|
||||||
|
DisableColors: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
log(logger)
|
||||||
|
|
||||||
|
fields := make(map[string]string)
|
||||||
|
for _, kv := range strings.Split(buffer.String(), " ") {
|
||||||
|
if !strings.Contains(kv, "=") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
kvArr := strings.Split(kv, "=")
|
||||||
|
key := strings.TrimSpace(kvArr[0])
|
||||||
|
val := kvArr[1]
|
||||||
|
if kvArr[1][0] == '"' {
|
||||||
|
var err error
|
||||||
|
val, err = strconv.Unquote(val)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
fields[key] = val
|
||||||
|
}
|
||||||
|
assertions(fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrint(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Print("test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["msg"], "test")
|
||||||
|
assert.Equal(t, fields["level"], "info")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInfo(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Info("test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["msg"], "test")
|
||||||
|
assert.Equal(t, fields["level"], "info")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWarn(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Warn("test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["msg"], "test")
|
||||||
|
assert.Equal(t, fields["level"], "warning")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Infoln("test", "test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["msg"], "test test")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Infoln("test", 10)
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["msg"], "test 10")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Infoln(10, 10)
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["msg"], "10 10")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Infoln(10, 10)
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["msg"], "10 10")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Info("test", 10)
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["msg"], "test10")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.Info("test", "test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["msg"], "testtest")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
var fields Fields
|
||||||
|
|
||||||
|
logger := New()
|
||||||
|
logger.Out = &buffer
|
||||||
|
logger.Formatter = new(JSONFormatter)
|
||||||
|
|
||||||
|
localLog := logger.WithFields(Fields{
|
||||||
|
"key1": "value1",
|
||||||
|
})
|
||||||
|
|
||||||
|
localLog.WithField("key2", "value2").Info("test")
|
||||||
|
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, "value2", fields["key2"])
|
||||||
|
assert.Equal(t, "value1", fields["key1"])
|
||||||
|
|
||||||
|
buffer = bytes.Buffer{}
|
||||||
|
fields = Fields{}
|
||||||
|
localLog.Info("test")
|
||||||
|
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
_, ok := fields["key2"]
|
||||||
|
assert.Equal(t, false, ok)
|
||||||
|
assert.Equal(t, "value1", fields["key1"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.WithField("msg", "hello").Info("test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["msg"], "test")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.WithField("msg", "hello").Info("test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["msg"], "test")
|
||||||
|
assert.Equal(t, fields["fields.msg"], "hello")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.WithField("time", "hello").Info("test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["fields.time"], "hello")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
|
||||||
|
LogAndAssertJSON(t, func(log *Logger) {
|
||||||
|
log.WithField("level", 1).Info("test")
|
||||||
|
}, func(fields Fields) {
|
||||||
|
assert.Equal(t, fields["level"], "info")
|
||||||
|
assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
|
||||||
|
LogAndAssertText(t, func(log *Logger) {
|
||||||
|
ll := log.WithField("herp", "derp")
|
||||||
|
ll.Info("hello")
|
||||||
|
ll.Info("bye")
|
||||||
|
}, func(fields map[string]string) {
|
||||||
|
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
|
||||||
|
if _, ok := fields[fieldName]; ok {
|
||||||
|
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
|
||||||
|
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
var fields Fields
|
||||||
|
|
||||||
|
logger := New()
|
||||||
|
logger.Out = &buffer
|
||||||
|
logger.Formatter = new(JSONFormatter)
|
||||||
|
|
||||||
|
llog := logger.WithField("context", "eating raw fish")
|
||||||
|
|
||||||
|
llog.Info("looks delicious")
|
||||||
|
|
||||||
|
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||||
|
assert.NoError(t, err, "should have decoded first message")
|
||||||
|
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
|
||||||
|
assert.Equal(t, fields["msg"], "looks delicious")
|
||||||
|
assert.Equal(t, fields["context"], "eating raw fish")
|
||||||
|
|
||||||
|
buffer.Reset()
|
||||||
|
|
||||||
|
llog.Warn("omg it is!")
|
||||||
|
|
||||||
|
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||||
|
assert.NoError(t, err, "should have decoded second message")
|
||||||
|
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
|
||||||
|
assert.Equal(t, fields["msg"], "omg it is!")
|
||||||
|
assert.Equal(t, fields["context"], "eating raw fish")
|
||||||
|
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConvertLevelToString(t *testing.T) {
|
||||||
|
assert.Equal(t, "debug", DebugLevel.String())
|
||||||
|
assert.Equal(t, "info", InfoLevel.String())
|
||||||
|
assert.Equal(t, "warning", WarnLevel.String())
|
||||||
|
assert.Equal(t, "error", ErrorLevel.String())
|
||||||
|
assert.Equal(t, "fatal", FatalLevel.String())
|
||||||
|
assert.Equal(t, "panic", PanicLevel.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseLevel(t *testing.T) {
|
||||||
|
l, err := ParseLevel("panic")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, PanicLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("PANIC")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, PanicLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("fatal")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, FatalLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("FATAL")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, FatalLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("error")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, ErrorLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("ERROR")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, ErrorLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("warn")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, WarnLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("WARN")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, WarnLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("warning")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, WarnLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("WARNING")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, WarnLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("info")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, InfoLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("INFO")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, InfoLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("debug")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, DebugLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("DEBUG")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, DebugLevel, l)
|
||||||
|
|
||||||
|
l, err = ParseLevel("invalid")
|
||||||
|
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetSetLevelRace(t *testing.T) {
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(i int) {
|
||||||
|
defer wg.Done()
|
||||||
|
if i%2 == 0 {
|
||||||
|
SetLevel(InfoLevel)
|
||||||
|
} else {
|
||||||
|
GetLevel()
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoggingRace(t *testing.T) {
|
||||||
|
logger := New()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(100)
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
go func() {
|
||||||
|
logger.Info("info")
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile test
|
||||||
|
func TestLogrusInterface(t *testing.T) {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
fn := func(l FieldLogger) {
|
||||||
|
b := l.WithField("key", "value")
|
||||||
|
b.Debug("Test")
|
||||||
|
}
|
||||||
|
// test logger
|
||||||
|
logger := New()
|
||||||
|
logger.Out = &buffer
|
||||||
|
fn(logger)
|
||||||
|
|
||||||
|
// test Entry
|
||||||
|
e := logger.WithField("another", "value")
|
||||||
|
fn(e)
|
||||||
|
}
|
8
vendor/src/github.com/Sirupsen/logrus/terminal_appengine.go
vendored
Normal file
8
vendor/src/github.com/Sirupsen/logrus/terminal_appengine.go
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
return true
|
||||||
|
}
|
10
vendor/src/github.com/Sirupsen/logrus/terminal_bsd.go
vendored
Normal file
10
vendor/src/github.com/Sirupsen/logrus/terminal_bsd.go
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
// +build darwin freebsd openbsd netbsd dragonfly
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TIOCGETA
|
||||||
|
|
||||||
|
type Termios syscall.Termios
|
14
vendor/src/github.com/Sirupsen/logrus/terminal_linux.go
vendored
Normal file
14
vendor/src/github.com/Sirupsen/logrus/terminal_linux.go
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TCGETS
|
||||||
|
|
||||||
|
type Termios syscall.Termios
|
22
vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go
vendored
Normal file
22
vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
fd := syscall.Stderr
|
||||||
|
var termios Termios
|
||||||
|
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||||
|
return err == 0
|
||||||
|
}
|
15
vendor/src/github.com/Sirupsen/logrus/terminal_solaris.go
vendored
Normal file
15
vendor/src/github.com/Sirupsen/logrus/terminal_solaris.go
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
// +build solaris,!appengine
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
|
||||||
|
return err == nil
|
||||||
|
}
|
27
vendor/src/github.com/Sirupsen/logrus/terminal_windows.go
vendored
Normal file
27
vendor/src/github.com/Sirupsen/logrus/terminal_windows.go
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build windows,!appengine
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
|
||||||
|
var (
|
||||||
|
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
fd := syscall.Stderr
|
||||||
|
var st uint32
|
||||||
|
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
||||||
|
return r != 0 && e == 0
|
||||||
|
}
|
166
vendor/src/github.com/Sirupsen/logrus/text_formatter.go
vendored
Normal file
166
vendor/src/github.com/Sirupsen/logrus/text_formatter.go
vendored
Normal file
|
@ -0,0 +1,166 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
nocolor = 0
|
||||||
|
red = 31
|
||||||
|
green = 32
|
||||||
|
yellow = 33
|
||||||
|
blue = 34
|
||||||
|
gray = 37
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
baseTimestamp time.Time
|
||||||
|
isTerminal bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
baseTimestamp = time.Now()
|
||||||
|
isTerminal = IsTerminal()
|
||||||
|
}
|
||||||
|
|
||||||
|
type TextFormatter struct {
|
||||||
|
// Set to true to bypass checking for a TTY before outputting colors.
|
||||||
|
ForceColors bool
|
||||||
|
|
||||||
|
// Force disabling colors.
|
||||||
|
DisableColors bool
|
||||||
|
|
||||||
|
// Disable timestamp logging. useful when output is redirected to logging
|
||||||
|
// system that already adds timestamps.
|
||||||
|
DisableTimestamp bool
|
||||||
|
|
||||||
|
// Enable logging the full timestamp when a TTY is attached instead of just
|
||||||
|
// the time passed since beginning of execution.
|
||||||
|
FullTimestamp bool
|
||||||
|
|
||||||
|
// TimestampFormat to use for display when a full timestamp is printed
|
||||||
|
TimestampFormat string
|
||||||
|
|
||||||
|
// The fields are sorted by default for a consistent output. For applications
|
||||||
|
// that log extremely frequently and don't use the JSON formatter this may not
|
||||||
|
// be desired.
|
||||||
|
DisableSorting bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
|
var b *bytes.Buffer
|
||||||
|
keys := make([]string, 0, len(entry.Data))
|
||||||
|
for k := range entry.Data {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !f.DisableSorting {
|
||||||
|
sort.Strings(keys)
|
||||||
|
}
|
||||||
|
if entry.Buffer != nil {
|
||||||
|
b = entry.Buffer
|
||||||
|
} else {
|
||||||
|
b = &bytes.Buffer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixFieldClashes(entry.Data)
|
||||||
|
|
||||||
|
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
||||||
|
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
||||||
|
|
||||||
|
timestampFormat := f.TimestampFormat
|
||||||
|
if timestampFormat == "" {
|
||||||
|
timestampFormat = DefaultTimestampFormat
|
||||||
|
}
|
||||||
|
if isColored {
|
||||||
|
f.printColored(b, entry, keys, timestampFormat)
|
||||||
|
} else {
|
||||||
|
if !f.DisableTimestamp {
|
||||||
|
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
||||||
|
}
|
||||||
|
f.appendKeyValue(b, "level", entry.Level.String())
|
||||||
|
if entry.Message != "" {
|
||||||
|
f.appendKeyValue(b, "msg", entry.Message)
|
||||||
|
}
|
||||||
|
for _, key := range keys {
|
||||||
|
f.appendKeyValue(b, key, entry.Data[key])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.WriteByte('\n')
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||||
|
var levelColor int
|
||||||
|
switch entry.Level {
|
||||||
|
case DebugLevel:
|
||||||
|
levelColor = gray
|
||||||
|
case WarnLevel:
|
||||||
|
levelColor = yellow
|
||||||
|
case ErrorLevel, FatalLevel, PanicLevel:
|
||||||
|
levelColor = red
|
||||||
|
default:
|
||||||
|
levelColor = blue
|
||||||
|
}
|
||||||
|
|
||||||
|
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
||||||
|
|
||||||
|
if f.DisableTimestamp {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
|
||||||
|
} else if !f.FullTimestamp {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||||
|
}
|
||||||
|
for _, k := range keys {
|
||||||
|
v := entry.Data[k]
|
||||||
|
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
|
||||||
|
f.appendValue(b, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func needsQuoting(text string) bool {
|
||||||
|
for _, ch := range text {
|
||||||
|
if !((ch >= 'a' && ch <= 'z') ||
|
||||||
|
(ch >= 'A' && ch <= 'Z') ||
|
||||||
|
(ch >= '0' && ch <= '9') ||
|
||||||
|
ch == '-' || ch == '.') {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||||
|
|
||||||
|
b.WriteString(key)
|
||||||
|
b.WriteByte('=')
|
||||||
|
f.appendValue(b, value)
|
||||||
|
b.WriteByte(' ')
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
||||||
|
switch value := value.(type) {
|
||||||
|
case string:
|
||||||
|
if !needsQuoting(value) {
|
||||||
|
b.WriteString(value)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "%q", value)
|
||||||
|
}
|
||||||
|
case error:
|
||||||
|
errmsg := value.Error()
|
||||||
|
if !needsQuoting(errmsg) {
|
||||||
|
b.WriteString(errmsg)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "%q", errmsg)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fmt.Fprint(b, value)
|
||||||
|
}
|
||||||
|
}
|
71
vendor/src/github.com/Sirupsen/logrus/text_formatter_test.go
vendored
Normal file
71
vendor/src/github.com/Sirupsen/logrus/text_formatter_test.go
vendored
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestQuoting(t *testing.T) {
|
||||||
|
tf := &TextFormatter{DisableColors: true}
|
||||||
|
|
||||||
|
checkQuoting := func(q bool, value interface{}) {
|
||||||
|
b, _ := tf.Format(WithField("test", value))
|
||||||
|
idx := bytes.Index(b, ([]byte)("test="))
|
||||||
|
cont := bytes.Contains(b[idx+5:], []byte{'"'})
|
||||||
|
if cont != q {
|
||||||
|
if q {
|
||||||
|
t.Errorf("quoting expected for: %#v", value)
|
||||||
|
} else {
|
||||||
|
t.Errorf("quoting not expected for: %#v", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
checkQuoting(false, "abcd")
|
||||||
|
checkQuoting(false, "v1.0")
|
||||||
|
checkQuoting(false, "1234567890")
|
||||||
|
checkQuoting(true, "/foobar")
|
||||||
|
checkQuoting(true, "x y")
|
||||||
|
checkQuoting(true, "x,y")
|
||||||
|
checkQuoting(false, errors.New("invalid"))
|
||||||
|
checkQuoting(true, errors.New("invalid argument"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTimestampFormat(t *testing.T) {
|
||||||
|
checkTimeStr := func(format string) {
|
||||||
|
customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
|
||||||
|
customStr, _ := customFormatter.Format(WithField("test", "test"))
|
||||||
|
timeStart := bytes.Index(customStr, ([]byte)("time="))
|
||||||
|
timeEnd := bytes.Index(customStr, ([]byte)("level="))
|
||||||
|
timeStr := customStr[timeStart+5 : timeEnd-1]
|
||||||
|
if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
|
||||||
|
timeStr = timeStr[1 : len(timeStr)-1]
|
||||||
|
}
|
||||||
|
if format == "" {
|
||||||
|
format = time.RFC3339
|
||||||
|
}
|
||||||
|
_, e := time.Parse(format, (string)(timeStr))
|
||||||
|
if e != nil {
|
||||||
|
t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
|
||||||
|
checkTimeStr("Mon Jan _2 15:04:05 2006")
|
||||||
|
checkTimeStr("")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisableTimestampWithColoredOutput(t *testing.T) {
|
||||||
|
tf := &TextFormatter{DisableTimestamp: true, ForceColors: true}
|
||||||
|
|
||||||
|
b, _ := tf.Format(WithField("test", "test"))
|
||||||
|
if strings.Contains(string(b), "[0000]") {
|
||||||
|
t.Error("timestamp not expected when DisableTimestamp is true")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO add tests for sorting etc., this requires a parser for the text
|
||||||
|
// formatter output.
|
53
vendor/src/github.com/Sirupsen/logrus/writer.go
vendored
Normal file
53
vendor/src/github.com/Sirupsen/logrus/writer.go
vendored
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (logger *Logger) Writer() *io.PipeWriter {
|
||||||
|
return logger.WriterLevel(InfoLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
||||||
|
reader, writer := io.Pipe()
|
||||||
|
|
||||||
|
var printFunc func(args ...interface{})
|
||||||
|
switch level {
|
||||||
|
case DebugLevel:
|
||||||
|
printFunc = logger.Debug
|
||||||
|
case InfoLevel:
|
||||||
|
printFunc = logger.Info
|
||||||
|
case WarnLevel:
|
||||||
|
printFunc = logger.Warn
|
||||||
|
case ErrorLevel:
|
||||||
|
printFunc = logger.Error
|
||||||
|
case FatalLevel:
|
||||||
|
printFunc = logger.Fatal
|
||||||
|
case PanicLevel:
|
||||||
|
printFunc = logger.Panic
|
||||||
|
default:
|
||||||
|
printFunc = logger.Print
|
||||||
|
}
|
||||||
|
|
||||||
|
go logger.writerScanner(reader, printFunc)
|
||||||
|
runtime.SetFinalizer(writer, writerFinalizer)
|
||||||
|
|
||||||
|
return writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
for scanner.Scan() {
|
||||||
|
printFunc(scanner.Text())
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
logger.Errorf("Error while reading from Writer: %s", err)
|
||||||
|
}
|
||||||
|
reader.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func writerFinalizer(writer *io.PipeWriter) {
|
||||||
|
writer.Close()
|
||||||
|
}
|
63
vendor/src/github.com/beorn7/perks/quantile/bench_test.go
vendored
Normal file
63
vendor/src/github.com/beorn7/perks/quantile/bench_test.go
vendored
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
package quantile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkInsertTargeted(b *testing.B) {
|
||||||
|
b.ReportAllocs()
|
||||||
|
|
||||||
|
s := NewTargeted(Targets)
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := float64(0); i < float64(b.N); i++ {
|
||||||
|
s.Insert(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) {
|
||||||
|
s := NewTargeted(TargetsSmallEpsilon)
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := float64(0); i < float64(b.N); i++ {
|
||||||
|
s.Insert(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkInsertBiased(b *testing.B) {
|
||||||
|
s := NewLowBiased(0.01)
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := float64(0); i < float64(b.N); i++ {
|
||||||
|
s.Insert(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) {
|
||||||
|
s := NewLowBiased(0.0001)
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := float64(0); i < float64(b.N); i++ {
|
||||||
|
s.Insert(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkQuery(b *testing.B) {
|
||||||
|
s := NewTargeted(Targets)
|
||||||
|
for i := float64(0); i < 1e6; i++ {
|
||||||
|
s.Insert(i)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
n := float64(b.N)
|
||||||
|
for i := float64(0); i < n; i++ {
|
||||||
|
s.Query(i / n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkQuerySmallEpsilon(b *testing.B) {
|
||||||
|
s := NewTargeted(TargetsSmallEpsilon)
|
||||||
|
for i := float64(0); i < 1e6; i++ {
|
||||||
|
s.Insert(i)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
n := float64(b.N)
|
||||||
|
for i := float64(0); i < n; i++ {
|
||||||
|
s.Query(i / n)
|
||||||
|
}
|
||||||
|
}
|
121
vendor/src/github.com/beorn7/perks/quantile/example_test.go
vendored
Normal file
121
vendor/src/github.com/beorn7/perks/quantile/example_test.go
vendored
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
// +build go1.1
|
||||||
|
|
||||||
|
package quantile_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/beorn7/perks/quantile"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Example_simple() {
|
||||||
|
ch := make(chan float64)
|
||||||
|
go sendFloats(ch)
|
||||||
|
|
||||||
|
// Compute the 50th, 90th, and 99th percentile.
|
||||||
|
q := quantile.NewTargeted(map[float64]float64{
|
||||||
|
0.50: 0.005,
|
||||||
|
0.90: 0.001,
|
||||||
|
0.99: 0.0001,
|
||||||
|
})
|
||||||
|
for v := range ch {
|
||||||
|
q.Insert(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("perc50:", q.Query(0.50))
|
||||||
|
fmt.Println("perc90:", q.Query(0.90))
|
||||||
|
fmt.Println("perc99:", q.Query(0.99))
|
||||||
|
fmt.Println("count:", q.Count())
|
||||||
|
// Output:
|
||||||
|
// perc50: 5
|
||||||
|
// perc90: 16
|
||||||
|
// perc99: 223
|
||||||
|
// count: 2388
|
||||||
|
}
|
||||||
|
|
||||||
|
func Example_mergeMultipleStreams() {
|
||||||
|
// Scenario:
|
||||||
|
// We have multiple database shards. On each shard, there is a process
|
||||||
|
// collecting query response times from the database logs and inserting
|
||||||
|
// them into a Stream (created via NewTargeted(0.90)), much like the
|
||||||
|
// Simple example. These processes expose a network interface for us to
|
||||||
|
// ask them to serialize and send us the results of their
|
||||||
|
// Stream.Samples so we may Merge and Query them.
|
||||||
|
//
|
||||||
|
// NOTES:
|
||||||
|
// * These sample sets are small, allowing us to get them
|
||||||
|
// across the network much faster than sending the entire list of data
|
||||||
|
// points.
|
||||||
|
//
|
||||||
|
// * For this to work correctly, we must supply the same quantiles
|
||||||
|
// a priori the process collecting the samples supplied to NewTargeted,
|
||||||
|
// even if we do not plan to query them all here.
|
||||||
|
ch := make(chan quantile.Samples)
|
||||||
|
getDBQuerySamples(ch)
|
||||||
|
q := quantile.NewTargeted(map[float64]float64{0.90: 0.001})
|
||||||
|
for samples := range ch {
|
||||||
|
q.Merge(samples)
|
||||||
|
}
|
||||||
|
fmt.Println("perc90:", q.Query(0.90))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Example_window() {
|
||||||
|
// Scenario: We want the 90th, 95th, and 99th percentiles for each
|
||||||
|
// minute.
|
||||||
|
|
||||||
|
ch := make(chan float64)
|
||||||
|
go sendStreamValues(ch)
|
||||||
|
|
||||||
|
tick := time.NewTicker(1 * time.Minute)
|
||||||
|
q := quantile.NewTargeted(map[float64]float64{
|
||||||
|
0.90: 0.001,
|
||||||
|
0.95: 0.0005,
|
||||||
|
0.99: 0.0001,
|
||||||
|
})
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case t := <-tick.C:
|
||||||
|
flushToDB(t, q.Samples())
|
||||||
|
q.Reset()
|
||||||
|
case v := <-ch:
|
||||||
|
q.Insert(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sendStreamValues(ch chan float64) {
|
||||||
|
// Use your imagination
|
||||||
|
}
|
||||||
|
|
||||||
|
func flushToDB(t time.Time, samples quantile.Samples) {
|
||||||
|
// Use your imagination
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is a stub for the above example. In reality this would hit the remote
|
||||||
|
// servers via http or something like it.
|
||||||
|
func getDBQuerySamples(ch chan quantile.Samples) {}
|
||||||
|
|
||||||
|
func sendFloats(ch chan<- float64) {
|
||||||
|
f, err := os.Open("exampledata.txt")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
sc := bufio.NewScanner(f)
|
||||||
|
for sc.Scan() {
|
||||||
|
b := sc.Bytes()
|
||||||
|
v, err := strconv.ParseFloat(string(b), 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
ch <- v
|
||||||
|
}
|
||||||
|
if sc.Err() != nil {
|
||||||
|
log.Fatal(sc.Err())
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}
|
2388
vendor/src/github.com/beorn7/perks/quantile/exampledata.txt
vendored
Normal file
2388
vendor/src/github.com/beorn7/perks/quantile/exampledata.txt
vendored
Normal file
File diff suppressed because it is too large
Load diff
292
vendor/src/github.com/beorn7/perks/quantile/stream.go
vendored
Normal file
292
vendor/src/github.com/beorn7/perks/quantile/stream.go
vendored
Normal file
|
@ -0,0 +1,292 @@
|
||||||
|
// Package quantile computes approximate quantiles over an unbounded data
|
||||||
|
// stream within low memory and CPU bounds.
|
||||||
|
//
|
||||||
|
// A small amount of accuracy is traded to achieve the above properties.
|
||||||
|
//
|
||||||
|
// Multiple streams can be merged before calling Query to generate a single set
|
||||||
|
// of results. This is meaningful when the streams represent the same type of
|
||||||
|
// data. See Merge and Samples.
|
||||||
|
//
|
||||||
|
// For more detailed information about the algorithm used, see:
|
||||||
|
//
|
||||||
|
// Effective Computation of Biased Quantiles over Data Streams
|
||||||
|
//
|
||||||
|
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||||
|
package quantile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sample holds an observed value and meta information for compression. JSON
|
||||||
|
// tags have been added for convenience.
|
||||||
|
type Sample struct {
|
||||||
|
Value float64 `json:",string"`
|
||||||
|
Width float64 `json:",string"`
|
||||||
|
Delta float64 `json:",string"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Samples represents a slice of samples. It implements sort.Interface.
|
||||||
|
type Samples []Sample
|
||||||
|
|
||||||
|
func (a Samples) Len() int { return len(a) }
|
||||||
|
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||||
|
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
|
||||||
|
type invariant func(s *stream, r float64) float64
|
||||||
|
|
||||||
|
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||||
|
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||||
|
// error guarantees can still be given even for the lower ranks of the data
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||||
|
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||||
|
// properties.
|
||||||
|
func NewLowBiased(epsilon float64) *Stream {
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
return 2 * epsilon * r
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||||
|
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||||
|
// error guarantees can still be given even for the higher ranks of the data
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||||
|
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||||
|
// properties.
|
||||||
|
func NewHighBiased(epsilon float64) *Stream {
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
return 2 * epsilon * (s.n - r)
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||||
|
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||||
|
// space and computation time. The targets map maps the desired quantiles to
|
||||||
|
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||||
|
// is guaranteed to be within (Quantile±Epsilon).
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||||
|
func NewTargeted(targets map[float64]float64) *Stream {
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
var m = math.MaxFloat64
|
||||||
|
var f float64
|
||||||
|
for quantile, epsilon := range targets {
|
||||||
|
if quantile*s.n <= r {
|
||||||
|
f = (2 * epsilon * r) / quantile
|
||||||
|
} else {
|
||||||
|
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||||
|
}
|
||||||
|
if f < m {
|
||||||
|
m = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||||
|
// design. Take care when using across multiple goroutines.
|
||||||
|
type Stream struct {
|
||||||
|
*stream
|
||||||
|
b Samples
|
||||||
|
sorted bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStream(ƒ invariant) *Stream {
|
||||||
|
x := &stream{ƒ: ƒ}
|
||||||
|
return &Stream{x, make(Samples, 0, 500), true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert inserts v into the stream.
|
||||||
|
func (s *Stream) Insert(v float64) {
|
||||||
|
s.insert(Sample{Value: v, Width: 1})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) insert(sample Sample) {
|
||||||
|
s.b = append(s.b, sample)
|
||||||
|
s.sorted = false
|
||||||
|
if len(s.b) == cap(s.b) {
|
||||||
|
s.flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query returns the computed qth percentiles value. If s was created with
|
||||||
|
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||||
|
// will return an unspecified result.
|
||||||
|
func (s *Stream) Query(q float64) float64 {
|
||||||
|
if !s.flushed() {
|
||||||
|
// Fast path when there hasn't been enough data for a flush;
|
||||||
|
// this also yields better accuracy for small sets of data.
|
||||||
|
l := len(s.b)
|
||||||
|
if l == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
i := int(math.Ceil(float64(l) * q))
|
||||||
|
if i > 0 {
|
||||||
|
i -= 1
|
||||||
|
}
|
||||||
|
s.maybeSort()
|
||||||
|
return s.b[i].Value
|
||||||
|
}
|
||||||
|
s.flush()
|
||||||
|
return s.stream.query(q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges samples into the underlying streams samples. This is handy when
|
||||||
|
// merging multiple streams from separate threads, database shards, etc.
|
||||||
|
//
|
||||||
|
// ATTENTION: This method is broken and does not yield correct results. The
|
||||||
|
// underlying algorithm is not capable of merging streams correctly.
|
||||||
|
func (s *Stream) Merge(samples Samples) {
|
||||||
|
sort.Sort(samples)
|
||||||
|
s.stream.merge(samples)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||||
|
func (s *Stream) Reset() {
|
||||||
|
s.stream.reset()
|
||||||
|
s.b = s.b[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Samples returns stream samples held by s.
|
||||||
|
func (s *Stream) Samples() Samples {
|
||||||
|
if !s.flushed() {
|
||||||
|
return s.b
|
||||||
|
}
|
||||||
|
s.flush()
|
||||||
|
return s.stream.samples()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the total number of samples observed in the stream
|
||||||
|
// since initialization.
|
||||||
|
func (s *Stream) Count() int {
|
||||||
|
return len(s.b) + s.stream.count()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) flush() {
|
||||||
|
s.maybeSort()
|
||||||
|
s.stream.merge(s.b)
|
||||||
|
s.b = s.b[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) maybeSort() {
|
||||||
|
if !s.sorted {
|
||||||
|
s.sorted = true
|
||||||
|
sort.Sort(s.b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) flushed() bool {
|
||||||
|
return len(s.stream.l) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type stream struct {
|
||||||
|
n float64
|
||||||
|
l []Sample
|
||||||
|
ƒ invariant
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) reset() {
|
||||||
|
s.l = s.l[:0]
|
||||||
|
s.n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) insert(v float64) {
|
||||||
|
s.merge(Samples{{v, 1, 0}})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) merge(samples Samples) {
|
||||||
|
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||||
|
// whole summaries. The paper doesn't mention merging summaries at
|
||||||
|
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||||
|
// do merges properly.
|
||||||
|
var r float64
|
||||||
|
i := 0
|
||||||
|
for _, sample := range samples {
|
||||||
|
for ; i < len(s.l); i++ {
|
||||||
|
c := s.l[i]
|
||||||
|
if c.Value > sample.Value {
|
||||||
|
// Insert at position i.
|
||||||
|
s.l = append(s.l, Sample{})
|
||||||
|
copy(s.l[i+1:], s.l[i:])
|
||||||
|
s.l[i] = Sample{
|
||||||
|
sample.Value,
|
||||||
|
sample.Width,
|
||||||
|
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||||
|
// TODO(beorn7): How to calculate delta correctly?
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
goto inserted
|
||||||
|
}
|
||||||
|
r += c.Width
|
||||||
|
}
|
||||||
|
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||||
|
i++
|
||||||
|
inserted:
|
||||||
|
s.n += sample.Width
|
||||||
|
r += sample.Width
|
||||||
|
}
|
||||||
|
s.compress()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) count() int {
|
||||||
|
return int(s.n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) query(q float64) float64 {
|
||||||
|
t := math.Ceil(q * s.n)
|
||||||
|
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||||
|
p := s.l[0]
|
||||||
|
var r float64
|
||||||
|
for _, c := range s.l[1:] {
|
||||||
|
r += p.Width
|
||||||
|
if r+c.Width+c.Delta > t {
|
||||||
|
return p.Value
|
||||||
|
}
|
||||||
|
p = c
|
||||||
|
}
|
||||||
|
return p.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) compress() {
|
||||||
|
if len(s.l) < 2 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
x := s.l[len(s.l)-1]
|
||||||
|
xi := len(s.l) - 1
|
||||||
|
r := s.n - 1 - x.Width
|
||||||
|
|
||||||
|
for i := len(s.l) - 2; i >= 0; i-- {
|
||||||
|
c := s.l[i]
|
||||||
|
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||||
|
x.Width += c.Width
|
||||||
|
s.l[xi] = x
|
||||||
|
// Remove element at i.
|
||||||
|
copy(s.l[i:], s.l[i+1:])
|
||||||
|
s.l = s.l[:len(s.l)-1]
|
||||||
|
xi -= 1
|
||||||
|
} else {
|
||||||
|
x = c
|
||||||
|
xi = i
|
||||||
|
}
|
||||||
|
r -= c.Width
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) samples() Samples {
|
||||||
|
samples := make(Samples, len(s.l))
|
||||||
|
copy(samples, s.l)
|
||||||
|
return samples
|
||||||
|
}
|
215
vendor/src/github.com/beorn7/perks/quantile/stream_test.go
vendored
Normal file
215
vendor/src/github.com/beorn7/perks/quantile/stream_test.go
vendored
Normal file
|
@ -0,0 +1,215 @@
|
||||||
|
package quantile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
Targets = map[float64]float64{
|
||||||
|
0.01: 0.001,
|
||||||
|
0.10: 0.01,
|
||||||
|
0.50: 0.05,
|
||||||
|
0.90: 0.01,
|
||||||
|
0.99: 0.001,
|
||||||
|
}
|
||||||
|
TargetsSmallEpsilon = map[float64]float64{
|
||||||
|
0.01: 0.0001,
|
||||||
|
0.10: 0.001,
|
||||||
|
0.50: 0.005,
|
||||||
|
0.90: 0.001,
|
||||||
|
0.99: 0.0001,
|
||||||
|
}
|
||||||
|
LowQuantiles = []float64{0.01, 0.1, 0.5}
|
||||||
|
HighQuantiles = []float64{0.99, 0.9, 0.5}
|
||||||
|
)
|
||||||
|
|
||||||
|
const RelativeEpsilon = 0.01
|
||||||
|
|
||||||
|
func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||||
|
sort.Float64s(a)
|
||||||
|
for quantile, epsilon := range Targets {
|
||||||
|
n := float64(len(a))
|
||||||
|
k := int(quantile * n)
|
||||||
|
if k < 1 {
|
||||||
|
k = 1
|
||||||
|
}
|
||||||
|
lower := int((quantile - epsilon) * n)
|
||||||
|
if lower < 1 {
|
||||||
|
lower = 1
|
||||||
|
}
|
||||||
|
upper := int(math.Ceil((quantile + epsilon) * n))
|
||||||
|
if upper > len(a) {
|
||||||
|
upper = len(a)
|
||||||
|
}
|
||||||
|
w, min, max := a[k-1], a[lower-1], a[upper-1]
|
||||||
|
if g := s.Query(quantile); g < min || g > max {
|
||||||
|
t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||||
|
sort.Float64s(a)
|
||||||
|
for _, qu := range LowQuantiles {
|
||||||
|
n := float64(len(a))
|
||||||
|
k := int(qu * n)
|
||||||
|
|
||||||
|
lowerRank := int((1 - RelativeEpsilon) * qu * n)
|
||||||
|
upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n))
|
||||||
|
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
|
||||||
|
if g := s.Query(qu); g < min || g > max {
|
||||||
|
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||||
|
sort.Float64s(a)
|
||||||
|
for _, qu := range HighQuantiles {
|
||||||
|
n := float64(len(a))
|
||||||
|
k := int(qu * n)
|
||||||
|
|
||||||
|
lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n)
|
||||||
|
upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n))
|
||||||
|
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
|
||||||
|
if g := s.Query(qu); g < min || g > max {
|
||||||
|
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateStream(s *Stream) []float64 {
|
||||||
|
a := make([]float64, 0, 1e5+100)
|
||||||
|
for i := 0; i < cap(a); i++ {
|
||||||
|
v := rand.NormFloat64()
|
||||||
|
// Add 5% asymmetric outliers.
|
||||||
|
if i%20 == 0 {
|
||||||
|
v = v*v + 1
|
||||||
|
}
|
||||||
|
s.Insert(v)
|
||||||
|
a = append(a, v)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTargetedQuery(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s := NewTargeted(Targets)
|
||||||
|
a := populateStream(s)
|
||||||
|
verifyPercsWithAbsoluteEpsilon(t, a, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTargetedQuerySmallSampleSize(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s := NewTargeted(TargetsSmallEpsilon)
|
||||||
|
a := []float64{1, 2, 3, 4, 5}
|
||||||
|
for _, v := range a {
|
||||||
|
s.Insert(v)
|
||||||
|
}
|
||||||
|
verifyPercsWithAbsoluteEpsilon(t, a, s)
|
||||||
|
// If not yet flushed, results should be precise:
|
||||||
|
if !s.flushed() {
|
||||||
|
for φ, want := range map[float64]float64{
|
||||||
|
0.01: 1,
|
||||||
|
0.10: 1,
|
||||||
|
0.50: 3,
|
||||||
|
0.90: 5,
|
||||||
|
0.99: 5,
|
||||||
|
} {
|
||||||
|
if got := s.Query(φ); got != want {
|
||||||
|
t.Errorf("want %f for φ=%f, got %f", want, φ, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLowBiasedQuery(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s := NewLowBiased(RelativeEpsilon)
|
||||||
|
a := populateStream(s)
|
||||||
|
verifyLowPercsWithRelativeEpsilon(t, a, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHighBiasedQuery(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s := NewHighBiased(RelativeEpsilon)
|
||||||
|
a := populateStream(s)
|
||||||
|
verifyHighPercsWithRelativeEpsilon(t, a, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BrokenTestTargetedMerge is broken, see Merge doc comment.
|
||||||
|
func BrokenTestTargetedMerge(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s1 := NewTargeted(Targets)
|
||||||
|
s2 := NewTargeted(Targets)
|
||||||
|
a := populateStream(s1)
|
||||||
|
a = append(a, populateStream(s2)...)
|
||||||
|
s1.Merge(s2.Samples())
|
||||||
|
verifyPercsWithAbsoluteEpsilon(t, a, s1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BrokenTestLowBiasedMerge is broken, see Merge doc comment.
|
||||||
|
func BrokenTestLowBiasedMerge(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s1 := NewLowBiased(RelativeEpsilon)
|
||||||
|
s2 := NewLowBiased(RelativeEpsilon)
|
||||||
|
a := populateStream(s1)
|
||||||
|
a = append(a, populateStream(s2)...)
|
||||||
|
s1.Merge(s2.Samples())
|
||||||
|
verifyLowPercsWithRelativeEpsilon(t, a, s2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BrokenTestHighBiasedMerge is broken, see Merge doc comment.
|
||||||
|
func BrokenTestHighBiasedMerge(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s1 := NewHighBiased(RelativeEpsilon)
|
||||||
|
s2 := NewHighBiased(RelativeEpsilon)
|
||||||
|
a := populateStream(s1)
|
||||||
|
a = append(a, populateStream(s2)...)
|
||||||
|
s1.Merge(s2.Samples())
|
||||||
|
verifyHighPercsWithRelativeEpsilon(t, a, s2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUncompressed(t *testing.T) {
|
||||||
|
q := NewTargeted(Targets)
|
||||||
|
for i := 100; i > 0; i-- {
|
||||||
|
q.Insert(float64(i))
|
||||||
|
}
|
||||||
|
if g := q.Count(); g != 100 {
|
||||||
|
t.Errorf("want count 100, got %d", g)
|
||||||
|
}
|
||||||
|
// Before compression, Query should have 100% accuracy.
|
||||||
|
for quantile := range Targets {
|
||||||
|
w := quantile * 100
|
||||||
|
if g := q.Query(quantile); g != w {
|
||||||
|
t.Errorf("want %f, got %f", w, g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUncompressedSamples(t *testing.T) {
|
||||||
|
q := NewTargeted(map[float64]float64{0.99: 0.001})
|
||||||
|
for i := 1; i <= 100; i++ {
|
||||||
|
q.Insert(float64(i))
|
||||||
|
}
|
||||||
|
if g := q.Samples().Len(); g != 100 {
|
||||||
|
t.Errorf("want count 100, got %d", g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUncompressedOne(t *testing.T) {
|
||||||
|
q := NewTargeted(map[float64]float64{0.99: 0.01})
|
||||||
|
q.Insert(3.14)
|
||||||
|
if g := q.Query(0.90); g != 3.14 {
|
||||||
|
t.Error("want PI, got", g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaults(t *testing.T) {
|
||||||
|
if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 {
|
||||||
|
t.Errorf("want 0, got %f", g)
|
||||||
|
}
|
||||||
|
}
|
43
vendor/src/github.com/golang/protobuf/proto/Makefile
vendored
Normal file
43
vendor/src/github.com/golang/protobuf/proto/Makefile
vendored
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
# Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
#
|
||||||
|
# Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
# https://github.com/golang/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
install:
|
||||||
|
go install
|
||||||
|
|
||||||
|
test: install generate-test-pbs
|
||||||
|
go test
|
||||||
|
|
||||||
|
|
||||||
|
generate-test-pbs:
|
||||||
|
make install
|
||||||
|
make -C testdata
|
||||||
|
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
|
||||||
|
make
|
2278
vendor/src/github.com/golang/protobuf/proto/all_test.go
vendored
Normal file
2278
vendor/src/github.com/golang/protobuf/proto/all_test.go
vendored
Normal file
File diff suppressed because it is too large
Load diff
300
vendor/src/github.com/golang/protobuf/proto/any_test.go
vendored
Normal file
300
vendor/src/github.com/golang/protobuf/proto/any_test.go
vendored
Normal file
|
@ -0,0 +1,300 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
testpb "github.com/golang/protobuf/proto/testdata"
|
||||||
|
anypb "github.com/golang/protobuf/ptypes/any"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
expandedMarshaler = proto.TextMarshaler{ExpandAny: true}
|
||||||
|
expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true}
|
||||||
|
)
|
||||||
|
|
||||||
|
// anyEqual reports whether two messages which may be google.protobuf.Any or may
|
||||||
|
// contain google.protobuf.Any fields are equal. We can't use proto.Equal for
|
||||||
|
// comparison, because semantically equivalent messages may be marshaled to
|
||||||
|
// binary in different tag order. Instead, trust that TextMarshaler with
|
||||||
|
// ExpandAny option works and compare the text marshaling results.
|
||||||
|
func anyEqual(got, want proto.Message) bool {
|
||||||
|
// if messages are proto.Equal, no need to marshal.
|
||||||
|
if proto.Equal(got, want) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
g := expandedMarshaler.Text(got)
|
||||||
|
w := expandedMarshaler.Text(want)
|
||||||
|
return g == w
|
||||||
|
}
|
||||||
|
|
||||||
|
type golden struct {
|
||||||
|
m proto.Message
|
||||||
|
t, c string
|
||||||
|
}
|
||||||
|
|
||||||
|
var goldenMessages = makeGolden()
|
||||||
|
|
||||||
|
func makeGolden() []golden {
|
||||||
|
nested := &pb.Nested{Bunny: "Monty"}
|
||||||
|
nb, err := proto.Marshal(nested)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
m1 := &pb.Message{
|
||||||
|
Name: "David",
|
||||||
|
ResultCount: 47,
|
||||||
|
Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb},
|
||||||
|
}
|
||||||
|
m2 := &pb.Message{
|
||||||
|
Name: "David",
|
||||||
|
ResultCount: 47,
|
||||||
|
Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb},
|
||||||
|
}
|
||||||
|
m3 := &pb.Message{
|
||||||
|
Name: "David",
|
||||||
|
ResultCount: 47,
|
||||||
|
Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb},
|
||||||
|
}
|
||||||
|
m4 := &pb.Message{
|
||||||
|
Name: "David",
|
||||||
|
ResultCount: 47,
|
||||||
|
Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb},
|
||||||
|
}
|
||||||
|
m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}
|
||||||
|
|
||||||
|
any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")}
|
||||||
|
proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")})
|
||||||
|
proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar"))
|
||||||
|
any1b, err := proto.Marshal(any1)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}}
|
||||||
|
proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")})
|
||||||
|
any2b, err := proto.Marshal(any2)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
m6 := &pb.Message{
|
||||||
|
Name: "David",
|
||||||
|
ResultCount: 47,
|
||||||
|
Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b},
|
||||||
|
ManyThings: []*anypb.Any{
|
||||||
|
&anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b},
|
||||||
|
&anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
m1Golden = `
|
||||||
|
name: "David"
|
||||||
|
result_count: 47
|
||||||
|
anything: <
|
||||||
|
[type.googleapis.com/proto3_proto.Nested]: <
|
||||||
|
bunny: "Monty"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
`
|
||||||
|
m2Golden = `
|
||||||
|
name: "David"
|
||||||
|
result_count: 47
|
||||||
|
anything: <
|
||||||
|
["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: <
|
||||||
|
bunny: "Monty"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
`
|
||||||
|
m3Golden = `
|
||||||
|
name: "David"
|
||||||
|
result_count: 47
|
||||||
|
anything: <
|
||||||
|
["type.googleapis.com/\"/proto3_proto.Nested"]: <
|
||||||
|
bunny: "Monty"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
`
|
||||||
|
m4Golden = `
|
||||||
|
name: "David"
|
||||||
|
result_count: 47
|
||||||
|
anything: <
|
||||||
|
[type.googleapis.com/a/path/proto3_proto.Nested]: <
|
||||||
|
bunny: "Monty"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
`
|
||||||
|
m5Golden = `
|
||||||
|
[type.googleapis.com/proto3_proto.Nested]: <
|
||||||
|
bunny: "Monty"
|
||||||
|
>
|
||||||
|
`
|
||||||
|
m6Golden = `
|
||||||
|
name: "David"
|
||||||
|
result_count: 47
|
||||||
|
anything: <
|
||||||
|
[type.googleapis.com/testdata.MyMessage]: <
|
||||||
|
count: 47
|
||||||
|
name: "David"
|
||||||
|
[testdata.Ext.more]: <
|
||||||
|
data: "foo"
|
||||||
|
>
|
||||||
|
[testdata.Ext.text]: "bar"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
many_things: <
|
||||||
|
[type.googleapis.com/testdata.MyMessage]: <
|
||||||
|
count: 42
|
||||||
|
bikeshed: GREEN
|
||||||
|
rep_bytes: "roboto"
|
||||||
|
[testdata.Ext.more]: <
|
||||||
|
data: "baz"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
many_things: <
|
||||||
|
[type.googleapis.com/testdata.MyMessage]: <
|
||||||
|
count: 47
|
||||||
|
name: "David"
|
||||||
|
[testdata.Ext.more]: <
|
||||||
|
data: "foo"
|
||||||
|
>
|
||||||
|
[testdata.Ext.text]: "bar"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
`
|
||||||
|
)
|
||||||
|
return []golden{
|
||||||
|
{m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "},
|
||||||
|
{m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "},
|
||||||
|
{m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "},
|
||||||
|
{m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "},
|
||||||
|
{m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "},
|
||||||
|
{m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalGolden(t *testing.T) {
|
||||||
|
for _, tt := range goldenMessages {
|
||||||
|
if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want {
|
||||||
|
t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want)
|
||||||
|
}
|
||||||
|
if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want {
|
||||||
|
t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalGolden(t *testing.T) {
|
||||||
|
for _, tt := range goldenMessages {
|
||||||
|
want := tt.m
|
||||||
|
got := proto.Clone(tt.m)
|
||||||
|
got.Reset()
|
||||||
|
if err := proto.UnmarshalText(tt.t, got); err != nil {
|
||||||
|
t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err)
|
||||||
|
}
|
||||||
|
if !anyEqual(got, want) {
|
||||||
|
t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want)
|
||||||
|
}
|
||||||
|
got.Reset()
|
||||||
|
if err := proto.UnmarshalText(tt.c, got); err != nil {
|
||||||
|
t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err)
|
||||||
|
}
|
||||||
|
if !anyEqual(got, want) {
|
||||||
|
t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalUnknownAny(t *testing.T) {
|
||||||
|
m := &pb.Message{
|
||||||
|
Anything: &anypb.Any{
|
||||||
|
TypeUrl: "foo",
|
||||||
|
Value: []byte("bar"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
want := `anything: <
|
||||||
|
type_url: "foo"
|
||||||
|
value: "bar"
|
||||||
|
>
|
||||||
|
`
|
||||||
|
got := expandedMarshaler.Text(m)
|
||||||
|
if got != want {
|
||||||
|
t.Errorf("got\n`%s`\nwant\n`%s`", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAmbiguousAny(t *testing.T) {
|
||||||
|
pb := &anypb.Any{}
|
||||||
|
err := proto.UnmarshalText(`
|
||||||
|
type_url: "ttt/proto3_proto.Nested"
|
||||||
|
value: "\n\x05Monty"
|
||||||
|
`, pb)
|
||||||
|
t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to parse ambiguous Any message: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalOverwriteAny(t *testing.T) {
|
||||||
|
pb := &anypb.Any{}
|
||||||
|
err := proto.UnmarshalText(`
|
||||||
|
[type.googleapis.com/a/path/proto3_proto.Nested]: <
|
||||||
|
bunny: "Monty"
|
||||||
|
>
|
||||||
|
[type.googleapis.com/a/path/proto3_proto.Nested]: <
|
||||||
|
bunny: "Rabbit of Caerbannog"
|
||||||
|
>
|
||||||
|
`, pb)
|
||||||
|
want := `line 7: Any message unpacked multiple times, or "type_url" already set`
|
||||||
|
if err.Error() != want {
|
||||||
|
t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalAnyMixAndMatch(t *testing.T) {
|
||||||
|
pb := &anypb.Any{}
|
||||||
|
err := proto.UnmarshalText(`
|
||||||
|
value: "\n\x05Monty"
|
||||||
|
[type.googleapis.com/a/path/proto3_proto.Nested]: <
|
||||||
|
bunny: "Rabbit of Caerbannog"
|
||||||
|
>
|
||||||
|
`, pb)
|
||||||
|
want := `line 5: Any message unpacked multiple times, or "value" already set`
|
||||||
|
if err.Error() != want {
|
||||||
|
t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want)
|
||||||
|
}
|
||||||
|
}
|
229
vendor/src/github.com/golang/protobuf/proto/clone.go
vendored
Normal file
229
vendor/src/github.com/golang/protobuf/proto/clone.go
vendored
Normal file
|
@ -0,0 +1,229 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// Protocol buffer deep copy and merge.
|
||||||
|
// TODO: RawMessage.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Clone returns a deep copy of a protocol buffer.
|
||||||
|
func Clone(pb Message) Message {
|
||||||
|
in := reflect.ValueOf(pb)
|
||||||
|
if in.IsNil() {
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
out := reflect.New(in.Type().Elem())
|
||||||
|
// out is empty so a merge is a deep copy.
|
||||||
|
mergeStruct(out.Elem(), in.Elem())
|
||||||
|
return out.Interface().(Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges src into dst.
|
||||||
|
// Required and optional fields that are set in src will be set to that value in dst.
|
||||||
|
// Elements of repeated fields will be appended.
|
||||||
|
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||||
|
func Merge(dst, src Message) {
|
||||||
|
in := reflect.ValueOf(src)
|
||||||
|
out := reflect.ValueOf(dst)
|
||||||
|
if out.IsNil() {
|
||||||
|
panic("proto: nil destination")
|
||||||
|
}
|
||||||
|
if in.Type() != out.Type() {
|
||||||
|
// Explicit test prior to mergeStruct so that mistyped nils will fail
|
||||||
|
panic("proto: type mismatch")
|
||||||
|
}
|
||||||
|
if in.IsNil() {
|
||||||
|
// Merging nil into non-nil is a quiet no-op
|
||||||
|
return
|
||||||
|
}
|
||||||
|
mergeStruct(out.Elem(), in.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeStruct(out, in reflect.Value) {
|
||||||
|
sprop := GetProperties(in.Type())
|
||||||
|
for i := 0; i < in.NumField(); i++ {
|
||||||
|
f := in.Type().Field(i)
|
||||||
|
if strings.HasPrefix(f.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
if emIn, ok := extendable(in.Addr().Interface()); ok {
|
||||||
|
emOut, _ := extendable(out.Addr().Interface())
|
||||||
|
mIn, muIn := emIn.extensionsRead()
|
||||||
|
if mIn != nil {
|
||||||
|
mOut := emOut.extensionsWrite()
|
||||||
|
muIn.Lock()
|
||||||
|
mergeExtension(mOut, mIn)
|
||||||
|
muIn.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uf := in.FieldByName("XXX_unrecognized")
|
||||||
|
if !uf.IsValid() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
uin := uf.Bytes()
|
||||||
|
if len(uin) > 0 {
|
||||||
|
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeAny performs a merge between two values of the same type.
|
||||||
|
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
||||||
|
// prop is set if this is a struct field (it may be nil).
|
||||||
|
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
||||||
|
if in.Type() == protoMessageType {
|
||||||
|
if !in.IsNil() {
|
||||||
|
if out.IsNil() {
|
||||||
|
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||||
|
} else {
|
||||||
|
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch in.Kind() {
|
||||||
|
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||||
|
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||||
|
if !viaPtr && isProto3Zero(in) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
out.Set(in)
|
||||||
|
case reflect.Interface:
|
||||||
|
// Probably a oneof field; copy non-nil values.
|
||||||
|
if in.IsNil() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Allocate destination if it is not set, or set to a different type.
|
||||||
|
// Otherwise we will merge as normal.
|
||||||
|
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
||||||
|
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
||||||
|
}
|
||||||
|
mergeAny(out.Elem(), in.Elem(), false, nil)
|
||||||
|
case reflect.Map:
|
||||||
|
if in.Len() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if out.IsNil() {
|
||||||
|
out.Set(reflect.MakeMap(in.Type()))
|
||||||
|
}
|
||||||
|
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||||
|
elemKind := in.Type().Elem().Kind()
|
||||||
|
for _, key := range in.MapKeys() {
|
||||||
|
var val reflect.Value
|
||||||
|
switch elemKind {
|
||||||
|
case reflect.Ptr:
|
||||||
|
val = reflect.New(in.Type().Elem().Elem())
|
||||||
|
mergeAny(val, in.MapIndex(key), false, nil)
|
||||||
|
case reflect.Slice:
|
||||||
|
val = in.MapIndex(key)
|
||||||
|
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||||
|
default:
|
||||||
|
val = in.MapIndex(key)
|
||||||
|
}
|
||||||
|
out.SetMapIndex(key, val)
|
||||||
|
}
|
||||||
|
case reflect.Ptr:
|
||||||
|
if in.IsNil() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if out.IsNil() {
|
||||||
|
out.Set(reflect.New(in.Elem().Type()))
|
||||||
|
}
|
||||||
|
mergeAny(out.Elem(), in.Elem(), true, nil)
|
||||||
|
case reflect.Slice:
|
||||||
|
if in.IsNil() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||||
|
// []byte is a scalar bytes field, not a repeated field.
|
||||||
|
|
||||||
|
// Edge case: if this is in a proto3 message, a zero length
|
||||||
|
// bytes field is considered the zero value, and should not
|
||||||
|
// be merged.
|
||||||
|
if prop != nil && prop.proto3 && in.Len() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a deep copy.
|
||||||
|
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||||
|
// with a nil result.
|
||||||
|
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n := in.Len()
|
||||||
|
if out.IsNil() {
|
||||||
|
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||||
|
}
|
||||||
|
switch in.Type().Elem().Kind() {
|
||||||
|
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||||
|
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||||
|
out.Set(reflect.AppendSlice(out, in))
|
||||||
|
default:
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||||
|
mergeAny(x, in.Index(i), false, nil)
|
||||||
|
out.Set(reflect.Append(out, x))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
mergeStruct(out, in)
|
||||||
|
default:
|
||||||
|
// unknown type, so not a protocol buffer
|
||||||
|
log.Printf("proto: don't know how to copy %v", in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeExtension(out, in map[int32]Extension) {
|
||||||
|
for extNum, eIn := range in {
|
||||||
|
eOut := Extension{desc: eIn.desc}
|
||||||
|
if eIn.value != nil {
|
||||||
|
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||||
|
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
||||||
|
eOut.value = v.Interface()
|
||||||
|
}
|
||||||
|
if eIn.enc != nil {
|
||||||
|
eOut.enc = make([]byte, len(eIn.enc))
|
||||||
|
copy(eOut.enc, eIn.enc)
|
||||||
|
}
|
||||||
|
|
||||||
|
out[extNum] = eOut
|
||||||
|
}
|
||||||
|
}
|
300
vendor/src/github.com/golang/protobuf/proto/clone_test.go
vendored
Normal file
300
vendor/src/github.com/golang/protobuf/proto/clone_test.go
vendored
Normal file
|
@ -0,0 +1,300 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
pb "github.com/golang/protobuf/proto/testdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cloneTestMessage = &pb.MyMessage{
|
||||||
|
Count: proto.Int32(42),
|
||||||
|
Name: proto.String("Dave"),
|
||||||
|
Pet: []string{"bunny", "kitty", "horsey"},
|
||||||
|
Inner: &pb.InnerMessage{
|
||||||
|
Host: proto.String("niles"),
|
||||||
|
Port: proto.Int32(9099),
|
||||||
|
Connected: proto.Bool(true),
|
||||||
|
},
|
||||||
|
Others: []*pb.OtherMessage{
|
||||||
|
{
|
||||||
|
Value: []byte("some bytes"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Somegroup: &pb.MyMessage_SomeGroup{
|
||||||
|
GroupField: proto.Int32(6),
|
||||||
|
},
|
||||||
|
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ext := &pb.Ext{
|
||||||
|
Data: proto.String("extension"),
|
||||||
|
}
|
||||||
|
if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {
|
||||||
|
panic("SetExtension: " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClone(t *testing.T) {
|
||||||
|
m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
|
||||||
|
if !proto.Equal(m, cloneTestMessage) {
|
||||||
|
t.Errorf("Clone(%v) = %v", cloneTestMessage, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify it was a deep copy.
|
||||||
|
*m.Inner.Port++
|
||||||
|
if proto.Equal(m, cloneTestMessage) {
|
||||||
|
t.Error("Mutating clone changed the original")
|
||||||
|
}
|
||||||
|
// Byte fields and repeated fields should be copied.
|
||||||
|
if &m.Pet[0] == &cloneTestMessage.Pet[0] {
|
||||||
|
t.Error("Pet: repeated field not copied")
|
||||||
|
}
|
||||||
|
if &m.Others[0] == &cloneTestMessage.Others[0] {
|
||||||
|
t.Error("Others: repeated field not copied")
|
||||||
|
}
|
||||||
|
if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {
|
||||||
|
t.Error("Others[0].Value: bytes field not copied")
|
||||||
|
}
|
||||||
|
if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {
|
||||||
|
t.Error("RepBytes: repeated field not copied")
|
||||||
|
}
|
||||||
|
if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {
|
||||||
|
t.Error("RepBytes[0]: bytes field not copied")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCloneNil(t *testing.T) {
|
||||||
|
var m *pb.MyMessage
|
||||||
|
if c := proto.Clone(m); !proto.Equal(m, c) {
|
||||||
|
t.Errorf("Clone(%v) = %v", m, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var mergeTests = []struct {
|
||||||
|
src, dst, want proto.Message
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
src: &pb.MyMessage{
|
||||||
|
Count: proto.Int32(42),
|
||||||
|
},
|
||||||
|
dst: &pb.MyMessage{
|
||||||
|
Name: proto.String("Dave"),
|
||||||
|
},
|
||||||
|
want: &pb.MyMessage{
|
||||||
|
Count: proto.Int32(42),
|
||||||
|
Name: proto.String("Dave"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
src: &pb.MyMessage{
|
||||||
|
Inner: &pb.InnerMessage{
|
||||||
|
Host: proto.String("hey"),
|
||||||
|
Connected: proto.Bool(true),
|
||||||
|
},
|
||||||
|
Pet: []string{"horsey"},
|
||||||
|
Others: []*pb.OtherMessage{
|
||||||
|
{
|
||||||
|
Value: []byte("some bytes"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
dst: &pb.MyMessage{
|
||||||
|
Inner: &pb.InnerMessage{
|
||||||
|
Host: proto.String("niles"),
|
||||||
|
Port: proto.Int32(9099),
|
||||||
|
},
|
||||||
|
Pet: []string{"bunny", "kitty"},
|
||||||
|
Others: []*pb.OtherMessage{
|
||||||
|
{
|
||||||
|
Key: proto.Int64(31415926535),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Explicitly test a src=nil field
|
||||||
|
Inner: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: &pb.MyMessage{
|
||||||
|
Inner: &pb.InnerMessage{
|
||||||
|
Host: proto.String("hey"),
|
||||||
|
Connected: proto.Bool(true),
|
||||||
|
Port: proto.Int32(9099),
|
||||||
|
},
|
||||||
|
Pet: []string{"bunny", "kitty", "horsey"},
|
||||||
|
Others: []*pb.OtherMessage{
|
||||||
|
{
|
||||||
|
Key: proto.Int64(31415926535),
|
||||||
|
},
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
Value: []byte("some bytes"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
src: &pb.MyMessage{
|
||||||
|
RepBytes: [][]byte{[]byte("wow")},
|
||||||
|
},
|
||||||
|
dst: &pb.MyMessage{
|
||||||
|
Somegroup: &pb.MyMessage_SomeGroup{
|
||||||
|
GroupField: proto.Int32(6),
|
||||||
|
},
|
||||||
|
RepBytes: [][]byte{[]byte("sham")},
|
||||||
|
},
|
||||||
|
want: &pb.MyMessage{
|
||||||
|
Somegroup: &pb.MyMessage_SomeGroup{
|
||||||
|
GroupField: proto.Int32(6),
|
||||||
|
},
|
||||||
|
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Check that a scalar bytes field replaces rather than appends.
|
||||||
|
{
|
||||||
|
src: &pb.OtherMessage{Value: []byte("foo")},
|
||||||
|
dst: &pb.OtherMessage{Value: []byte("bar")},
|
||||||
|
want: &pb.OtherMessage{Value: []byte("foo")},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
src: &pb.MessageWithMap{
|
||||||
|
NameMapping: map[int32]string{6: "Nigel"},
|
||||||
|
MsgMapping: map[int64]*pb.FloatingPoint{
|
||||||
|
0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
|
||||||
|
0x4002: &pb.FloatingPoint{
|
||||||
|
F: proto.Float64(2.0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ByteMapping: map[bool][]byte{true: []byte("wowsa")},
|
||||||
|
},
|
||||||
|
dst: &pb.MessageWithMap{
|
||||||
|
NameMapping: map[int32]string{
|
||||||
|
6: "Bruce", // should be overwritten
|
||||||
|
7: "Andrew",
|
||||||
|
},
|
||||||
|
MsgMapping: map[int64]*pb.FloatingPoint{
|
||||||
|
0x4002: &pb.FloatingPoint{
|
||||||
|
F: proto.Float64(3.0),
|
||||||
|
Exact: proto.Bool(true),
|
||||||
|
}, // the entire message should be overwritten
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: &pb.MessageWithMap{
|
||||||
|
NameMapping: map[int32]string{
|
||||||
|
6: "Nigel",
|
||||||
|
7: "Andrew",
|
||||||
|
},
|
||||||
|
MsgMapping: map[int64]*pb.FloatingPoint{
|
||||||
|
0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
|
||||||
|
0x4002: &pb.FloatingPoint{
|
||||||
|
F: proto.Float64(2.0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ByteMapping: map[bool][]byte{true: []byte("wowsa")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// proto3 shouldn't merge zero values,
|
||||||
|
// in the same way that proto2 shouldn't merge nils.
|
||||||
|
{
|
||||||
|
src: &proto3pb.Message{
|
||||||
|
Name: "Aaron",
|
||||||
|
Data: []byte(""), // zero value, but not nil
|
||||||
|
},
|
||||||
|
dst: &proto3pb.Message{
|
||||||
|
HeightInCm: 176,
|
||||||
|
Data: []byte("texas!"),
|
||||||
|
},
|
||||||
|
want: &proto3pb.Message{
|
||||||
|
Name: "Aaron",
|
||||||
|
HeightInCm: 176,
|
||||||
|
Data: []byte("texas!"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Oneof fields should merge by assignment.
|
||||||
|
{
|
||||||
|
src: &pb.Communique{
|
||||||
|
Union: &pb.Communique_Number{41},
|
||||||
|
},
|
||||||
|
dst: &pb.Communique{
|
||||||
|
Union: &pb.Communique_Name{"Bobby Tables"},
|
||||||
|
},
|
||||||
|
want: &pb.Communique{
|
||||||
|
Union: &pb.Communique_Number{41},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Oneof nil is the same as not set.
|
||||||
|
{
|
||||||
|
src: &pb.Communique{},
|
||||||
|
dst: &pb.Communique{
|
||||||
|
Union: &pb.Communique_Name{"Bobby Tables"},
|
||||||
|
},
|
||||||
|
want: &pb.Communique{
|
||||||
|
Union: &pb.Communique_Name{"Bobby Tables"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
src: &proto3pb.Message{
|
||||||
|
Terrain: map[string]*proto3pb.Nested{
|
||||||
|
"kay_a": &proto3pb.Nested{Cute: true}, // replace
|
||||||
|
"kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert
|
||||||
|
},
|
||||||
|
},
|
||||||
|
dst: &proto3pb.Message{
|
||||||
|
Terrain: map[string]*proto3pb.Nested{
|
||||||
|
"kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced
|
||||||
|
"kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: &proto3pb.Message{
|
||||||
|
Terrain: map[string]*proto3pb.Nested{
|
||||||
|
"kay_a": &proto3pb.Nested{Cute: true},
|
||||||
|
"kay_b": &proto3pb.Nested{Bunny: "rabbit"},
|
||||||
|
"kay_c": &proto3pb.Nested{Bunny: "bunny"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMerge(t *testing.T) {
|
||||||
|
for _, m := range mergeTests {
|
||||||
|
got := proto.Clone(m.dst)
|
||||||
|
proto.Merge(got, m.src)
|
||||||
|
if !proto.Equal(got, m.want) {
|
||||||
|
t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
970
vendor/src/github.com/golang/protobuf/proto/decode.go
vendored
Normal file
970
vendor/src/github.com/golang/protobuf/proto/decode.go
vendored
Normal file
|
@ -0,0 +1,970 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// errOverflow is returned when an integer is too large to be represented.
|
||||||
|
var errOverflow = errors.New("proto: integer overflow")
|
||||||
|
|
||||||
|
// ErrInternalBadWireType is returned by generated code when an incorrect
|
||||||
|
// wire type is encountered. It does not get returned to user code.
|
||||||
|
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||||
|
|
||||||
|
// The fundamental decoders that interpret bytes on the wire.
|
||||||
|
// Those that take integer types all return uint64 and are
|
||||||
|
// therefore of type valueDecoder.
|
||||||
|
|
||||||
|
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||||
|
// It returns the integer and the number of bytes consumed, or
|
||||||
|
// zero if there is not enough.
|
||||||
|
// This is the format for the
|
||||||
|
// int32, int64, uint32, uint64, bool, and enum
|
||||||
|
// protocol buffer types.
|
||||||
|
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||||
|
for shift := uint(0); shift < 64; shift += 7 {
|
||||||
|
if n >= len(buf) {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
b := uint64(buf[n])
|
||||||
|
n++
|
||||||
|
x |= (b & 0x7F) << shift
|
||||||
|
if (b & 0x80) == 0 {
|
||||||
|
return x, n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The number is too large to represent in a 64-bit value.
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||||
|
i := p.index
|
||||||
|
l := len(p.buf)
|
||||||
|
|
||||||
|
for shift := uint(0); shift < 64; shift += 7 {
|
||||||
|
if i >= l {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b := p.buf[i]
|
||||||
|
i++
|
||||||
|
x |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
p.index = i
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The number is too large to represent in a 64-bit value.
|
||||||
|
err = errOverflow
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||||
|
// This is the format for the
|
||||||
|
// int32, int64, uint32, uint64, bool, and enum
|
||||||
|
// protocol buffer types.
|
||||||
|
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||||
|
i := p.index
|
||||||
|
buf := p.buf
|
||||||
|
|
||||||
|
if i >= len(buf) {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
} else if buf[i] < 0x80 {
|
||||||
|
p.index++
|
||||||
|
return uint64(buf[i]), nil
|
||||||
|
} else if len(buf)-i < 10 {
|
||||||
|
return p.decodeVarintSlow()
|
||||||
|
}
|
||||||
|
|
||||||
|
var b uint64
|
||||||
|
// we already checked the first byte
|
||||||
|
x = uint64(buf[i]) - 0x80
|
||||||
|
i++
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 7
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 7
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 14
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 14
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 21
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 21
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 28
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 28
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 35
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 35
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 42
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 42
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 49
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 49
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 56
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 56
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 63
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
// x -= 0x80 << 63 // Always zero.
|
||||||
|
|
||||||
|
return 0, errOverflow
|
||||||
|
|
||||||
|
done:
|
||||||
|
p.index = i
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||||
|
// This is the format for the
|
||||||
|
// fixed64, sfixed64, and double protocol buffer types.
|
||||||
|
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||||
|
// x, err already 0
|
||||||
|
i := p.index + 8
|
||||||
|
if i < 0 || i > len(p.buf) {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.index = i
|
||||||
|
|
||||||
|
x = uint64(p.buf[i-8])
|
||||||
|
x |= uint64(p.buf[i-7]) << 8
|
||||||
|
x |= uint64(p.buf[i-6]) << 16
|
||||||
|
x |= uint64(p.buf[i-5]) << 24
|
||||||
|
x |= uint64(p.buf[i-4]) << 32
|
||||||
|
x |= uint64(p.buf[i-3]) << 40
|
||||||
|
x |= uint64(p.buf[i-2]) << 48
|
||||||
|
x |= uint64(p.buf[i-1]) << 56
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||||
|
// This is the format for the
|
||||||
|
// fixed32, sfixed32, and float protocol buffer types.
|
||||||
|
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||||
|
// x, err already 0
|
||||||
|
i := p.index + 4
|
||||||
|
if i < 0 || i > len(p.buf) {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.index = i
|
||||||
|
|
||||||
|
x = uint64(p.buf[i-4])
|
||||||
|
x |= uint64(p.buf[i-3]) << 8
|
||||||
|
x |= uint64(p.buf[i-2]) << 16
|
||||||
|
x |= uint64(p.buf[i-1]) << 24
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||||
|
// from the Buffer.
|
||||||
|
// This is the format used for the sint64 protocol buffer type.
|
||||||
|
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||||
|
x, err = p.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||||
|
// from the Buffer.
|
||||||
|
// This is the format used for the sint32 protocol buffer type.
|
||||||
|
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||||
|
x, err = p.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are not ValueDecoders: they produce an array of bytes or a string.
|
||||||
|
// bytes, embedded messages
|
||||||
|
|
||||||
|
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||||
|
// This is the format used for the bytes protocol buffer
|
||||||
|
// type and for embedded messages.
|
||||||
|
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||||
|
n, err := p.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nb := int(n)
|
||||||
|
if nb < 0 {
|
||||||
|
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||||
|
}
|
||||||
|
end := p.index + nb
|
||||||
|
if end < p.index || end > len(p.buf) {
|
||||||
|
return nil, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
if !alloc {
|
||||||
|
// todo: check if can get more uses of alloc=false
|
||||||
|
buf = p.buf[p.index:end]
|
||||||
|
p.index += nb
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = make([]byte, nb)
|
||||||
|
copy(buf, p.buf[p.index:])
|
||||||
|
p.index += nb
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||||
|
// This is the format used for the proto2 string type.
|
||||||
|
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||||
|
buf, err := p.DecodeRawBytes(false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return string(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
||||||
|
// If the protocol buffer has extensions, and the field matches, add it as an extension.
|
||||||
|
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
|
||||||
|
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
|
||||||
|
oi := o.index
|
||||||
|
|
||||||
|
err := o.skip(t, tag, wire)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !unrecField.IsValid() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ptr := structPointer_Bytes(base, unrecField)
|
||||||
|
|
||||||
|
// Add the skipped field to struct field
|
||||||
|
obuf := o.buf
|
||||||
|
|
||||||
|
o.buf = *ptr
|
||||||
|
o.EncodeVarint(uint64(tag<<3 | wire))
|
||||||
|
*ptr = append(o.buf, obuf[oi:o.index]...)
|
||||||
|
|
||||||
|
o.buf = obuf
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
||||||
|
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
|
||||||
|
|
||||||
|
var u uint64
|
||||||
|
var err error
|
||||||
|
|
||||||
|
switch wire {
|
||||||
|
case WireVarint:
|
||||||
|
_, err = o.DecodeVarint()
|
||||||
|
case WireFixed64:
|
||||||
|
_, err = o.DecodeFixed64()
|
||||||
|
case WireBytes:
|
||||||
|
_, err = o.DecodeRawBytes(false)
|
||||||
|
case WireFixed32:
|
||||||
|
_, err = o.DecodeFixed32()
|
||||||
|
case WireStartGroup:
|
||||||
|
for {
|
||||||
|
u, err = o.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fwire := int(u & 0x7)
|
||||||
|
if fwire == WireEndGroup {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ftag := int(u >> 3)
|
||||||
|
err = o.skip(t, ftag, fwire)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshaler is the interface representing objects that can
|
||||||
|
// unmarshal themselves. The method should reset the receiver before
|
||||||
|
// decoding starts. The argument points to data that may be
|
||||||
|
// overwritten, so implementations should not keep references to the
|
||||||
|
// buffer.
|
||||||
|
type Unmarshaler interface {
|
||||||
|
Unmarshal([]byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||||
|
// decoded result in pb. If the struct underlying pb does not match
|
||||||
|
// the data in buf, the results can be unpredictable.
|
||||||
|
//
|
||||||
|
// Unmarshal resets pb before starting to unmarshal, so any
|
||||||
|
// existing data in pb is always removed. Use UnmarshalMerge
|
||||||
|
// to preserve and append to existing data.
|
||||||
|
func Unmarshal(buf []byte, pb Message) error {
|
||||||
|
pb.Reset()
|
||||||
|
return UnmarshalMerge(buf, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||||
|
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||||
|
// the data in buf, the results can be unpredictable.
|
||||||
|
//
|
||||||
|
// UnmarshalMerge merges into existing data in pb.
|
||||||
|
// Most code should use Unmarshal instead.
|
||||||
|
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||||
|
// If the object can unmarshal itself, let it.
|
||||||
|
if u, ok := pb.(Unmarshaler); ok {
|
||||||
|
return u.Unmarshal(buf)
|
||||||
|
}
|
||||||
|
return NewBuffer(buf).Unmarshal(pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeMessage reads a count-delimited message from the Buffer.
|
||||||
|
func (p *Buffer) DecodeMessage(pb Message) error {
|
||||||
|
enc, err := p.DecodeRawBytes(false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return NewBuffer(enc).Unmarshal(pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||||
|
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||||
|
typ, base, err := getbase(pb)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal parses the protocol buffer representation in the
|
||||||
|
// Buffer and places the decoded result in pb. If the struct
|
||||||
|
// underlying pb does not match the data in the buffer, the results can be
|
||||||
|
// unpredictable.
|
||||||
|
//
|
||||||
|
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||||
|
func (p *Buffer) Unmarshal(pb Message) error {
|
||||||
|
// If the object can unmarshal itself, let it.
|
||||||
|
if u, ok := pb.(Unmarshaler); ok {
|
||||||
|
err := u.Unmarshal(p.buf[p.index:])
|
||||||
|
p.index = len(p.buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
typ, base, err := getbase(pb)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
|
||||||
|
|
||||||
|
if collectStats {
|
||||||
|
stats.Decode++
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalType does the work of unmarshaling a structure.
|
||||||
|
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
|
||||||
|
var state errorState
|
||||||
|
required, reqFields := prop.reqCount, uint64(0)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
for err == nil && o.index < len(o.buf) {
|
||||||
|
oi := o.index
|
||||||
|
var u uint64
|
||||||
|
u, err = o.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
wire := int(u & 0x7)
|
||||||
|
if wire == WireEndGroup {
|
||||||
|
if is_group {
|
||||||
|
if required > 0 {
|
||||||
|
// Not enough information to determine the exact field.
|
||||||
|
// (See below.)
|
||||||
|
return &RequiredNotSetError{"{Unknown}"}
|
||||||
|
}
|
||||||
|
return nil // input is satisfied
|
||||||
|
}
|
||||||
|
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
||||||
|
}
|
||||||
|
tag := int(u >> 3)
|
||||||
|
if tag <= 0 {
|
||||||
|
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
|
||||||
|
}
|
||||||
|
fieldnum, ok := prop.decoderTags.get(tag)
|
||||||
|
if !ok {
|
||||||
|
// Maybe it's an extension?
|
||||||
|
if prop.extendable {
|
||||||
|
if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
|
||||||
|
if err = o.skip(st, tag, wire); err == nil {
|
||||||
|
extmap := e.extensionsWrite()
|
||||||
|
ext := extmap[int32(tag)] // may be missing
|
||||||
|
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
||||||
|
extmap[int32(tag)] = ext
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Maybe it's a oneof?
|
||||||
|
if prop.oneofUnmarshaler != nil {
|
||||||
|
m := structPointer_Interface(base, st).(Message)
|
||||||
|
// First return value indicates whether tag is a oneof field.
|
||||||
|
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
|
||||||
|
if err == ErrInternalBadWireType {
|
||||||
|
// Map the error to something more descriptive.
|
||||||
|
// Do the formatting here to save generated code space.
|
||||||
|
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := prop.Prop[fieldnum]
|
||||||
|
|
||||||
|
if p.dec == nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dec := p.dec
|
||||||
|
if wire != WireStartGroup && wire != p.WireType {
|
||||||
|
if wire == WireBytes && p.packedDec != nil {
|
||||||
|
// a packable field
|
||||||
|
dec = p.packedDec
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
decErr := dec(o, p, base)
|
||||||
|
if decErr != nil && !state.shouldContinue(decErr, p) {
|
||||||
|
err = decErr
|
||||||
|
}
|
||||||
|
if err == nil && p.Required {
|
||||||
|
// Successfully decoded a required field.
|
||||||
|
if tag <= 64 {
|
||||||
|
// use bitmap for fields 1-64 to catch field reuse.
|
||||||
|
var mask uint64 = 1 << uint64(tag-1)
|
||||||
|
if reqFields&mask == 0 {
|
||||||
|
// new required field
|
||||||
|
reqFields |= mask
|
||||||
|
required--
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// This is imprecise. It can be fooled by a required field
|
||||||
|
// with a tag > 64 that is encoded twice; that's very rare.
|
||||||
|
// A fully correct implementation would require allocating
|
||||||
|
// a data structure, which we would like to avoid.
|
||||||
|
required--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
if is_group {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if state.err != nil {
|
||||||
|
return state.err
|
||||||
|
}
|
||||||
|
if required > 0 {
|
||||||
|
// Not enough information to determine the exact field. If we use extra
|
||||||
|
// CPU, we could determine the field only if the missing required field
|
||||||
|
// has a tag <= 64 and we check reqFields.
|
||||||
|
return &RequiredNotSetError{"{Unknown}"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Individual type decoders
|
||||||
|
// For each,
|
||||||
|
// u is the decoded value,
|
||||||
|
// v is a pointer to the field (pointer) in the struct
|
||||||
|
|
||||||
|
// Sizes of the pools to allocate inside the Buffer.
|
||||||
|
// The goal is modest amortization and allocation
|
||||||
|
// on at least 16-byte boundaries.
|
||||||
|
const (
|
||||||
|
boolPoolSize = 16
|
||||||
|
uint32PoolSize = 8
|
||||||
|
uint64PoolSize = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decode a bool.
|
||||||
|
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
|
||||||
|
u, err := p.valDec(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(o.bools) == 0 {
|
||||||
|
o.bools = make([]bool, boolPoolSize)
|
||||||
|
}
|
||||||
|
o.bools[0] = u != 0
|
||||||
|
*structPointer_Bool(base, p.field) = &o.bools[0]
|
||||||
|
o.bools = o.bools[1:]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
|
||||||
|
u, err := p.valDec(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*structPointer_BoolVal(base, p.field) = u != 0
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode an int32.
|
||||||
|
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
|
||||||
|
u, err := p.valDec(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
|
||||||
|
u, err := p.valDec(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode an int64.
|
||||||
|
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
|
||||||
|
u, err := p.valDec(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
word64_Set(structPointer_Word64(base, p.field), o, u)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
|
||||||
|
u, err := p.valDec(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a string.
|
||||||
|
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
|
||||||
|
s, err := o.DecodeStringBytes()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*structPointer_String(base, p.field) = &s
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
|
||||||
|
s, err := o.DecodeStringBytes()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*structPointer_StringVal(base, p.field) = s
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a slice of bytes ([]byte).
|
||||||
|
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
|
||||||
|
b, err := o.DecodeRawBytes(true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*structPointer_Bytes(base, p.field) = b
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a slice of bools ([]bool).
|
||||||
|
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
|
||||||
|
u, err := p.valDec(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
v := structPointer_BoolSlice(base, p.field)
|
||||||
|
*v = append(*v, u != 0)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a slice of bools ([]bool) in packed format.
|
||||||
|
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
|
||||||
|
v := structPointer_BoolSlice(base, p.field)
|
||||||
|
|
||||||
|
nn, err := o.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nb := int(nn) // number of bytes of encoded bools
|
||||||
|
fin := o.index + nb
|
||||||
|
if fin < o.index {
|
||||||
|
return errOverflow
|
||||||
|
}
|
||||||
|
|
||||||
|
y := *v
|
||||||
|
for o.index < fin {
|
||||||
|
u, err := p.valDec(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
y = append(y, u != 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
*v = y
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a slice of int32s ([]int32).
|
||||||
|
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
|
||||||
|
u, err := p.valDec(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
structPointer_Word32Slice(base, p.field).Append(uint32(u))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a slice of int32s ([]int32) in packed format.
|
||||||
|
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
|
||||||
|
v := structPointer_Word32Slice(base, p.field)
|
||||||
|
|
||||||
|
nn, err := o.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nb := int(nn) // number of bytes of encoded int32s
|
||||||
|
|
||||||
|
fin := o.index + nb
|
||||||
|
if fin < o.index {
|
||||||
|
return errOverflow
|
||||||
|
}
|
||||||
|
for o.index < fin {
|
||||||
|
u, err := p.valDec(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
v.Append(uint32(u))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a slice of int64s ([]int64).
|
||||||
|
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
|
||||||
|
u, err := p.valDec(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
structPointer_Word64Slice(base, p.field).Append(u)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a slice of int64s ([]int64) in packed format.
|
||||||
|
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
|
||||||
|
v := structPointer_Word64Slice(base, p.field)
|
||||||
|
|
||||||
|
nn, err := o.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nb := int(nn) // number of bytes of encoded int64s
|
||||||
|
|
||||||
|
fin := o.index + nb
|
||||||
|
if fin < o.index {
|
||||||
|
return errOverflow
|
||||||
|
}
|
||||||
|
for o.index < fin {
|
||||||
|
u, err := p.valDec(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
v.Append(u)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a slice of strings ([]string).
|
||||||
|
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
|
||||||
|
s, err := o.DecodeStringBytes()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
v := structPointer_StringSlice(base, p.field)
|
||||||
|
*v = append(*v, s)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a slice of slice of bytes ([][]byte).
|
||||||
|
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
|
||||||
|
b, err := o.DecodeRawBytes(true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
v := structPointer_BytesSlice(base, p.field)
|
||||||
|
*v = append(*v, b)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a map field.
|
||||||
|
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
|
||||||
|
raw, err := o.DecodeRawBytes(false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
oi := o.index // index at the end of this map entry
|
||||||
|
o.index -= len(raw) // move buffer back to start of map entry
|
||||||
|
|
||||||
|
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
|
||||||
|
if mptr.Elem().IsNil() {
|
||||||
|
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
|
||||||
|
}
|
||||||
|
v := mptr.Elem() // map[K]V
|
||||||
|
|
||||||
|
// Prepare addressable doubly-indirect placeholders for the key and value types.
|
||||||
|
// See enc_new_map for why.
|
||||||
|
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
|
||||||
|
keybase := toStructPointer(keyptr.Addr()) // **K
|
||||||
|
|
||||||
|
var valbase structPointer
|
||||||
|
var valptr reflect.Value
|
||||||
|
switch p.mtype.Elem().Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
// []byte
|
||||||
|
var dummy []byte
|
||||||
|
valptr = reflect.ValueOf(&dummy) // *[]byte
|
||||||
|
valbase = toStructPointer(valptr) // *[]byte
|
||||||
|
case reflect.Ptr:
|
||||||
|
// message; valptr is **Msg; need to allocate the intermediate pointer
|
||||||
|
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
||||||
|
valptr.Set(reflect.New(valptr.Type().Elem()))
|
||||||
|
valbase = toStructPointer(valptr)
|
||||||
|
default:
|
||||||
|
// everything else
|
||||||
|
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
||||||
|
valbase = toStructPointer(valptr.Addr()) // **V
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode.
|
||||||
|
// This parses a restricted wire format, namely the encoding of a message
|
||||||
|
// with two fields. See enc_new_map for the format.
|
||||||
|
for o.index < oi {
|
||||||
|
// tagcode for key and value properties are always a single byte
|
||||||
|
// because they have tags 1 and 2.
|
||||||
|
tagcode := o.buf[o.index]
|
||||||
|
o.index++
|
||||||
|
switch tagcode {
|
||||||
|
case p.mkeyprop.tagcode[0]:
|
||||||
|
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case p.mvalprop.tagcode[0]:
|
||||||
|
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// TODO: Should we silently skip this instead?
|
||||||
|
return fmt.Errorf("proto: bad map data tag %d", raw[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
keyelem, valelem := keyptr.Elem(), valptr.Elem()
|
||||||
|
if !keyelem.IsValid() {
|
||||||
|
keyelem = reflect.Zero(p.mtype.Key())
|
||||||
|
}
|
||||||
|
if !valelem.IsValid() {
|
||||||
|
valelem = reflect.Zero(p.mtype.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
v.SetMapIndex(keyelem, valelem)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a group.
|
||||||
|
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
|
||||||
|
bas := structPointer_GetStructPointer(base, p.field)
|
||||||
|
if structPointer_IsNil(bas) {
|
||||||
|
// allocate new nested message
|
||||||
|
bas = toStructPointer(reflect.New(p.stype))
|
||||||
|
structPointer_SetStructPointer(base, p.field, bas)
|
||||||
|
}
|
||||||
|
return o.unmarshalType(p.stype, p.sprop, true, bas)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode an embedded message.
|
||||||
|
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
|
||||||
|
raw, e := o.DecodeRawBytes(false)
|
||||||
|
if e != nil {
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
bas := structPointer_GetStructPointer(base, p.field)
|
||||||
|
if structPointer_IsNil(bas) {
|
||||||
|
// allocate new nested message
|
||||||
|
bas = toStructPointer(reflect.New(p.stype))
|
||||||
|
structPointer_SetStructPointer(base, p.field, bas)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the object can unmarshal itself, let it.
|
||||||
|
if p.isUnmarshaler {
|
||||||
|
iv := structPointer_Interface(bas, p.stype)
|
||||||
|
return iv.(Unmarshaler).Unmarshal(raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
obuf := o.buf
|
||||||
|
oi := o.index
|
||||||
|
o.buf = raw
|
||||||
|
o.index = 0
|
||||||
|
|
||||||
|
err = o.unmarshalType(p.stype, p.sprop, false, bas)
|
||||||
|
o.buf = obuf
|
||||||
|
o.index = oi
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a slice of embedded messages.
|
||||||
|
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
|
||||||
|
return o.dec_slice_struct(p, false, base)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a slice of embedded groups.
|
||||||
|
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
|
||||||
|
return o.dec_slice_struct(p, true, base)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode a slice of structs ([]*struct).
|
||||||
|
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
|
||||||
|
v := reflect.New(p.stype)
|
||||||
|
bas := toStructPointer(v)
|
||||||
|
structPointer_StructPointerSlice(base, p.field).Append(bas)
|
||||||
|
|
||||||
|
if is_group {
|
||||||
|
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
raw, err := o.DecodeRawBytes(false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the object can unmarshal itself, let it.
|
||||||
|
if p.isUnmarshaler {
|
||||||
|
iv := v.Interface()
|
||||||
|
return iv.(Unmarshaler).Unmarshal(raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
obuf := o.buf
|
||||||
|
oi := o.index
|
||||||
|
o.buf = raw
|
||||||
|
o.index = 0
|
||||||
|
|
||||||
|
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
||||||
|
|
||||||
|
o.buf = obuf
|
||||||
|
o.index = oi
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
256
vendor/src/github.com/golang/protobuf/proto/decode_test.go
vendored
Normal file
256
vendor/src/github.com/golang/protobuf/proto/decode_test.go
vendored
Normal file
|
@ -0,0 +1,256 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
tpb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
bytesBlackhole []byte
|
||||||
|
msgBlackhole = new(tpb.Message)
|
||||||
|
)
|
||||||
|
|
||||||
|
// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and
|
||||||
|
// 2 bytes long).
|
||||||
|
func BenchmarkVarint32ArraySmall(b *testing.B) {
|
||||||
|
for i := uint(1); i <= 10; i++ {
|
||||||
|
dist := genInt32Dist([7]int{0, 3, 1}, 1<<i)
|
||||||
|
raw, err := proto.Marshal(&tpb.Message{
|
||||||
|
ShortKey: dist,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
b.Error("wrong encode", err)
|
||||||
|
}
|
||||||
|
b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) {
|
||||||
|
scratchBuf := proto.NewBuffer(nil)
|
||||||
|
b.ResetTimer()
|
||||||
|
for k := 0; k < b.N; k++ {
|
||||||
|
scratchBuf.SetBuf(raw)
|
||||||
|
msgBlackhole.Reset()
|
||||||
|
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
|
||||||
|
b.Error("wrong decode", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkVarint32ArrayLarge shows the performance on an array of large int32 fields (3 and
|
||||||
|
// 4 bytes long, with a small number of 1, 2, 5 and 10 byte long versions).
|
||||||
|
func BenchmarkVarint32ArrayLarge(b *testing.B) {
|
||||||
|
for i := uint(1); i <= 10; i++ {
|
||||||
|
dist := genInt32Dist([7]int{0, 1, 2, 4, 8, 1, 1}, 1<<i)
|
||||||
|
raw, err := proto.Marshal(&tpb.Message{
|
||||||
|
ShortKey: dist,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
b.Error("wrong encode", err)
|
||||||
|
}
|
||||||
|
b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) {
|
||||||
|
scratchBuf := proto.NewBuffer(nil)
|
||||||
|
b.ResetTimer()
|
||||||
|
for k := 0; k < b.N; k++ {
|
||||||
|
scratchBuf.SetBuf(raw)
|
||||||
|
msgBlackhole.Reset()
|
||||||
|
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
|
||||||
|
b.Error("wrong decode", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkVarint64ArraySmall shows the performance on an array of small int64 fields (1 and
|
||||||
|
// 2 bytes long).
|
||||||
|
func BenchmarkVarint64ArraySmall(b *testing.B) {
|
||||||
|
for i := uint(1); i <= 10; i++ {
|
||||||
|
dist := genUint64Dist([11]int{0, 3, 1}, 1<<i)
|
||||||
|
raw, err := proto.Marshal(&tpb.Message{
|
||||||
|
Key: dist,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
b.Error("wrong encode", err)
|
||||||
|
}
|
||||||
|
b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) {
|
||||||
|
scratchBuf := proto.NewBuffer(nil)
|
||||||
|
b.ResetTimer()
|
||||||
|
for k := 0; k < b.N; k++ {
|
||||||
|
scratchBuf.SetBuf(raw)
|
||||||
|
msgBlackhole.Reset()
|
||||||
|
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
|
||||||
|
b.Error("wrong decode", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkVarint64ArrayLarge shows the performance on an array of large int64 fields (6, 7,
|
||||||
|
// and 8 bytes long with a small number of the other sizes).
|
||||||
|
func BenchmarkVarint64ArrayLarge(b *testing.B) {
|
||||||
|
for i := uint(1); i <= 10; i++ {
|
||||||
|
dist := genUint64Dist([11]int{0, 1, 1, 2, 4, 8, 16, 32, 16, 1, 1}, 1<<i)
|
||||||
|
raw, err := proto.Marshal(&tpb.Message{
|
||||||
|
Key: dist,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
b.Error("wrong encode", err)
|
||||||
|
}
|
||||||
|
b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) {
|
||||||
|
scratchBuf := proto.NewBuffer(nil)
|
||||||
|
b.ResetTimer()
|
||||||
|
for k := 0; k < b.N; k++ {
|
||||||
|
scratchBuf.SetBuf(raw)
|
||||||
|
msgBlackhole.Reset()
|
||||||
|
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
|
||||||
|
b.Error("wrong decode", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkVarint64ArrayMixed shows the performance of lots of small messages, each
|
||||||
|
// containing a small number of large (3, 4, and 5 byte) repeated int64s.
|
||||||
|
func BenchmarkVarint64ArrayMixed(b *testing.B) {
|
||||||
|
for i := uint(1); i <= 1<<5; i <<= 1 {
|
||||||
|
dist := genUint64Dist([11]int{0, 0, 0, 4, 6, 4, 0, 0, 0, 0, 0}, int(i))
|
||||||
|
// number of sub fields
|
||||||
|
for k := uint(1); k <= 1<<10; k <<= 2 {
|
||||||
|
msg := &tpb.Message{}
|
||||||
|
for m := uint(0); m < k; m++ {
|
||||||
|
msg.Children = append(msg.Children, &tpb.Message{
|
||||||
|
Key: dist,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
raw, err := proto.Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
b.Error("wrong encode", err)
|
||||||
|
}
|
||||||
|
b.Run(fmt.Sprintf("Fields%vLen%v", k, i), func(b *testing.B) {
|
||||||
|
scratchBuf := proto.NewBuffer(nil)
|
||||||
|
b.ResetTimer()
|
||||||
|
for k := 0; k < b.N; k++ {
|
||||||
|
scratchBuf.SetBuf(raw)
|
||||||
|
msgBlackhole.Reset()
|
||||||
|
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
|
||||||
|
b.Error("wrong decode", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// genInt32Dist generates a slice of ints that will match the size distribution of dist.
|
||||||
|
// A size of 6 corresponds to a max length varint32, which is 10 bytes. The distribution
|
||||||
|
// is 1-indexed. (i.e. the value at index 1 is how many 1 byte ints to create).
|
||||||
|
func genInt32Dist(dist [7]int, count int) (dest []int32) {
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
for k := 0; k < len(dist); k++ {
|
||||||
|
var num int32
|
||||||
|
switch k {
|
||||||
|
case 1:
|
||||||
|
num = 1<<7 - 1
|
||||||
|
case 2:
|
||||||
|
num = 1<<14 - 1
|
||||||
|
case 3:
|
||||||
|
num = 1<<21 - 1
|
||||||
|
case 4:
|
||||||
|
num = 1<<28 - 1
|
||||||
|
case 5:
|
||||||
|
num = 1<<29 - 1
|
||||||
|
case 6:
|
||||||
|
num = -1
|
||||||
|
}
|
||||||
|
for m := 0; m < dist[k]; m++ {
|
||||||
|
dest = append(dest, num)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// genUint64Dist generates a slice of ints that will match the size distribution of dist.
|
||||||
|
// The distribution is 1-indexed. (i.e. the value at index 1 is how many 1 byte ints to create).
|
||||||
|
func genUint64Dist(dist [11]int, count int) (dest []uint64) {
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
for k := 0; k < len(dist); k++ {
|
||||||
|
var num uint64
|
||||||
|
switch k {
|
||||||
|
case 1:
|
||||||
|
num = 1<<7 - 1
|
||||||
|
case 2:
|
||||||
|
num = 1<<14 - 1
|
||||||
|
case 3:
|
||||||
|
num = 1<<21 - 1
|
||||||
|
case 4:
|
||||||
|
num = 1<<28 - 1
|
||||||
|
case 5:
|
||||||
|
num = 1<<35 - 1
|
||||||
|
case 6:
|
||||||
|
num = 1<<42 - 1
|
||||||
|
case 7:
|
||||||
|
num = 1<<49 - 1
|
||||||
|
case 8:
|
||||||
|
num = 1<<56 - 1
|
||||||
|
case 9:
|
||||||
|
num = 1<<63 - 1
|
||||||
|
case 10:
|
||||||
|
num = 1<<64 - 1
|
||||||
|
}
|
||||||
|
for m := 0; m < dist[k]; m++ {
|
||||||
|
dest = append(dest, num)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkDecodeEmpty measures the overhead of doing the minimal possible decode.
|
||||||
|
func BenchmarkDecodeEmpty(b *testing.B) {
|
||||||
|
raw, err := proto.Marshal(&tpb.Message{})
|
||||||
|
if err != nil {
|
||||||
|
b.Error("wrong encode", err)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
if err := proto.Unmarshal(raw, msgBlackhole); err != nil {
|
||||||
|
b.Error("wrong decode", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
1362
vendor/src/github.com/golang/protobuf/proto/encode.go
vendored
Normal file
1362
vendor/src/github.com/golang/protobuf/proto/encode.go
vendored
Normal file
File diff suppressed because it is too large
Load diff
83
vendor/src/github.com/golang/protobuf/proto/encode_test.go
vendored
Normal file
83
vendor/src/github.com/golang/protobuf/proto/encode_test.go
vendored
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
tpb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
blackhole []byte
|
||||||
|
)
|
||||||
|
|
||||||
|
// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the
|
||||||
|
// same.
|
||||||
|
func BenchmarkAny(b *testing.B) {
|
||||||
|
data := make([]byte, 1<<20)
|
||||||
|
quantum := 1 << 10
|
||||||
|
for i := uint(0); i <= 10; i++ {
|
||||||
|
b.Run(strconv.Itoa(quantum<<i), func(b *testing.B) {
|
||||||
|
for k := 0; k < b.N; k++ {
|
||||||
|
inner := &tpb.Message{
|
||||||
|
Data: data[:quantum<<i],
|
||||||
|
}
|
||||||
|
outer, err := ptypes.MarshalAny(inner)
|
||||||
|
if err != nil {
|
||||||
|
b.Error("wrong encode", err)
|
||||||
|
}
|
||||||
|
raw, err := proto.Marshal(&tpb.Message{
|
||||||
|
Anything: outer,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
b.Error("wrong encode", err)
|
||||||
|
}
|
||||||
|
blackhole = raw
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkEmpy measures the overhead of doing the minimal possible encode.
|
||||||
|
func BenchmarkEmpy(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
raw, err := proto.Marshal(&tpb.Message{})
|
||||||
|
if err != nil {
|
||||||
|
b.Error("wrong encode", err)
|
||||||
|
}
|
||||||
|
blackhole = raw
|
||||||
|
}
|
||||||
|
}
|
300
vendor/src/github.com/golang/protobuf/proto/equal.go
vendored
Normal file
300
vendor/src/github.com/golang/protobuf/proto/equal.go
vendored
Normal file
|
@ -0,0 +1,300 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// Protocol buffer comparison.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Equal returns true iff protocol buffers a and b are equal.
|
||||||
|
The arguments must both be pointers to protocol buffer structs.
|
||||||
|
|
||||||
|
Equality is defined in this way:
|
||||||
|
- Two messages are equal iff they are the same type,
|
||||||
|
corresponding fields are equal, unknown field sets
|
||||||
|
are equal, and extensions sets are equal.
|
||||||
|
- Two set scalar fields are equal iff their values are equal.
|
||||||
|
If the fields are of a floating-point type, remember that
|
||||||
|
NaN != x for all x, including NaN. If the message is defined
|
||||||
|
in a proto3 .proto file, fields are not "set"; specifically,
|
||||||
|
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||||
|
- Two repeated fields are equal iff their lengths are the same,
|
||||||
|
and their corresponding elements are equal. Note a "bytes" field,
|
||||||
|
although represented by []byte, is not a repeated field and the
|
||||||
|
rule for the scalar fields described above applies.
|
||||||
|
- Two unset fields are equal.
|
||||||
|
- Two unknown field sets are equal if their current
|
||||||
|
encoded state is equal.
|
||||||
|
- Two extension sets are equal iff they have corresponding
|
||||||
|
elements that are pairwise equal.
|
||||||
|
- Two map fields are equal iff their lengths are the same,
|
||||||
|
and they contain the same set of elements. Zero-length map
|
||||||
|
fields are equal.
|
||||||
|
- Every other combination of things are not equal.
|
||||||
|
|
||||||
|
The return value is undefined if a and b are not protocol buffers.
|
||||||
|
*/
|
||||||
|
func Equal(a, b Message) bool {
|
||||||
|
if a == nil || b == nil {
|
||||||
|
return a == b
|
||||||
|
}
|
||||||
|
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||||
|
if v1.Type() != v2.Type() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if v1.Kind() == reflect.Ptr {
|
||||||
|
if v1.IsNil() {
|
||||||
|
return v2.IsNil()
|
||||||
|
}
|
||||||
|
if v2.IsNil() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
v1, v2 = v1.Elem(), v2.Elem()
|
||||||
|
}
|
||||||
|
if v1.Kind() != reflect.Struct {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return equalStruct(v1, v2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// v1 and v2 are known to have the same type.
|
||||||
|
func equalStruct(v1, v2 reflect.Value) bool {
|
||||||
|
sprop := GetProperties(v1.Type())
|
||||||
|
for i := 0; i < v1.NumField(); i++ {
|
||||||
|
f := v1.Type().Field(i)
|
||||||
|
if strings.HasPrefix(f.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
f1, f2 := v1.Field(i), v2.Field(i)
|
||||||
|
if f.Type.Kind() == reflect.Ptr {
|
||||||
|
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||||
|
// both unset
|
||||||
|
continue
|
||||||
|
} else if n1 != n2 {
|
||||||
|
// set/unset mismatch
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
b1, ok := f1.Interface().(raw)
|
||||||
|
if ok {
|
||||||
|
b2 := f2.Interface().(raw)
|
||||||
|
// RawMessage
|
||||||
|
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
f1, f2 = f1.Elem(), f2.Elem()
|
||||||
|
}
|
||||||
|
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||||
|
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||||
|
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||||
|
em2 := v2.FieldByName("XXX_extensions")
|
||||||
|
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uf := v1.FieldByName("XXX_unrecognized")
|
||||||
|
if !uf.IsValid() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
u1 := uf.Bytes()
|
||||||
|
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||||
|
if !bytes.Equal(u1, u2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// v1 and v2 are known to have the same type.
|
||||||
|
// prop may be nil.
|
||||||
|
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||||
|
if v1.Type() == protoMessageType {
|
||||||
|
m1, _ := v1.Interface().(Message)
|
||||||
|
m2, _ := v2.Interface().(Message)
|
||||||
|
return Equal(m1, m2)
|
||||||
|
}
|
||||||
|
switch v1.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
return v1.Bool() == v2.Bool()
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return v1.Float() == v2.Float()
|
||||||
|
case reflect.Int32, reflect.Int64:
|
||||||
|
return v1.Int() == v2.Int()
|
||||||
|
case reflect.Interface:
|
||||||
|
// Probably a oneof field; compare the inner values.
|
||||||
|
n1, n2 := v1.IsNil(), v2.IsNil()
|
||||||
|
if n1 || n2 {
|
||||||
|
return n1 == n2
|
||||||
|
}
|
||||||
|
e1, e2 := v1.Elem(), v2.Elem()
|
||||||
|
if e1.Type() != e2.Type() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return equalAny(e1, e2, nil)
|
||||||
|
case reflect.Map:
|
||||||
|
if v1.Len() != v2.Len() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, key := range v1.MapKeys() {
|
||||||
|
val2 := v2.MapIndex(key)
|
||||||
|
if !val2.IsValid() {
|
||||||
|
// This key was not found in the second map.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !equalAny(v1.MapIndex(key), val2, nil) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
case reflect.Ptr:
|
||||||
|
// Maps may have nil values in them, so check for nil.
|
||||||
|
if v1.IsNil() && v2.IsNil() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if v1.IsNil() != v2.IsNil() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||||
|
case reflect.Slice:
|
||||||
|
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||||
|
// short circuit: []byte
|
||||||
|
|
||||||
|
// Edge case: if this is in a proto3 message, a zero length
|
||||||
|
// bytes field is considered the zero value.
|
||||||
|
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if v1.IsNil() != v2.IsNil() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v1.Len() != v2.Len() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := 0; i < v1.Len(); i++ {
|
||||||
|
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
case reflect.String:
|
||||||
|
return v1.Interface().(string) == v2.Interface().(string)
|
||||||
|
case reflect.Struct:
|
||||||
|
return equalStruct(v1, v2)
|
||||||
|
case reflect.Uint32, reflect.Uint64:
|
||||||
|
return v1.Uint() == v2.Uint()
|
||||||
|
}
|
||||||
|
|
||||||
|
// unknown type, so not a protocol buffer
|
||||||
|
log.Printf("proto: don't know how to compare %v", v1)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// base is the struct type that the extensions are based on.
|
||||||
|
// x1 and x2 are InternalExtensions.
|
||||||
|
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||||
|
em1, _ := x1.extensionsRead()
|
||||||
|
em2, _ := x2.extensionsRead()
|
||||||
|
return equalExtMap(base, em1, em2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||||
|
if len(em1) != len(em2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for extNum, e1 := range em1 {
|
||||||
|
e2, ok := em2[extNum]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
m1, m2 := e1.value, e2.value
|
||||||
|
|
||||||
|
if m1 != nil && m2 != nil {
|
||||||
|
// Both are unencoded.
|
||||||
|
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// At least one is encoded. To do a semantically correct comparison
|
||||||
|
// we need to unmarshal them first.
|
||||||
|
var desc *ExtensionDesc
|
||||||
|
if m := extensionMaps[base]; m != nil {
|
||||||
|
desc = m[extNum]
|
||||||
|
}
|
||||||
|
if desc == nil {
|
||||||
|
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if m1 == nil {
|
||||||
|
m1, err = decodeExtension(e1.enc, desc)
|
||||||
|
}
|
||||||
|
if m2 == nil && err == nil {
|
||||||
|
m2, err = decodeExtension(e2.enc, desc)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
// The encoded form is invalid.
|
||||||
|
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
224
vendor/src/github.com/golang/protobuf/proto/equal_test.go
vendored
Normal file
224
vendor/src/github.com/golang/protobuf/proto/equal_test.go
vendored
Normal file
|
@ -0,0 +1,224 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
. "github.com/golang/protobuf/proto"
|
||||||
|
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
pb "github.com/golang/protobuf/proto/testdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Four identical base messages.
|
||||||
|
// The init function adds extensions to some of them.
|
||||||
|
var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)}
|
||||||
|
var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)}
|
||||||
|
var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)}
|
||||||
|
var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)}
|
||||||
|
|
||||||
|
// Two messages with non-message extensions.
|
||||||
|
var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)}
|
||||||
|
var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ext1 := &pb.Ext{Data: String("Kirk")}
|
||||||
|
ext2 := &pb.Ext{Data: String("Picard")}
|
||||||
|
|
||||||
|
// messageWithExtension1a has ext1, but never marshals it.
|
||||||
|
if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil {
|
||||||
|
panic("SetExtension on 1a failed: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// messageWithExtension1b is the unmarshaled form of messageWithExtension1a.
|
||||||
|
if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil {
|
||||||
|
panic("SetExtension on 1b failed: " + err.Error())
|
||||||
|
}
|
||||||
|
buf, err := Marshal(messageWithExtension1b)
|
||||||
|
if err != nil {
|
||||||
|
panic("Marshal of 1b failed: " + err.Error())
|
||||||
|
}
|
||||||
|
messageWithExtension1b.Reset()
|
||||||
|
if err := Unmarshal(buf, messageWithExtension1b); err != nil {
|
||||||
|
panic("Unmarshal of 1b failed: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// messageWithExtension2 has ext2.
|
||||||
|
if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil {
|
||||||
|
panic("SetExtension on 2 failed: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil {
|
||||||
|
panic("SetExtension on Int32-1 failed: " + err.Error())
|
||||||
|
}
|
||||||
|
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil {
|
||||||
|
panic("SetExtension on Int32-2 failed: " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var EqualTests = []struct {
|
||||||
|
desc string
|
||||||
|
a, b Message
|
||||||
|
exp bool
|
||||||
|
}{
|
||||||
|
{"different types", &pb.GoEnum{}, &pb.GoTestField{}, false},
|
||||||
|
{"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true},
|
||||||
|
{"nil vs nil", nil, nil, true},
|
||||||
|
{"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true},
|
||||||
|
{"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false},
|
||||||
|
{"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false},
|
||||||
|
|
||||||
|
{"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false},
|
||||||
|
{"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false},
|
||||||
|
{"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false},
|
||||||
|
{"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true},
|
||||||
|
|
||||||
|
{"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false},
|
||||||
|
{"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false},
|
||||||
|
{"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false},
|
||||||
|
{"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true},
|
||||||
|
{"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true},
|
||||||
|
{"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true},
|
||||||
|
{"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true},
|
||||||
|
|
||||||
|
{
|
||||||
|
"nested, different",
|
||||||
|
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}},
|
||||||
|
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nested, equal",
|
||||||
|
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
|
||||||
|
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
|
||||||
|
{"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true},
|
||||||
|
{"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true},
|
||||||
|
{"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false},
|
||||||
|
{
|
||||||
|
"repeated bytes",
|
||||||
|
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
|
||||||
|
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
// In proto3, []byte{} and []byte(nil) are equal.
|
||||||
|
{"proto3 bytes, empty vs nil", &proto3pb.Message{Data: []byte{}}, &proto3pb.Message{Data: nil}, true},
|
||||||
|
|
||||||
|
{"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false},
|
||||||
|
{"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true},
|
||||||
|
{"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false},
|
||||||
|
|
||||||
|
{"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true},
|
||||||
|
{"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false},
|
||||||
|
|
||||||
|
{
|
||||||
|
"message with group",
|
||||||
|
&pb.MyMessage{
|
||||||
|
Count: Int32(1),
|
||||||
|
Somegroup: &pb.MyMessage_SomeGroup{
|
||||||
|
GroupField: Int32(5),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&pb.MyMessage{
|
||||||
|
Count: Int32(1),
|
||||||
|
Somegroup: &pb.MyMessage_SomeGroup{
|
||||||
|
GroupField: Int32(5),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
"map same",
|
||||||
|
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||||
|
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"map different entry",
|
||||||
|
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||||
|
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"map different key only",
|
||||||
|
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||||
|
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"map different value only",
|
||||||
|
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||||
|
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"zero-length maps same",
|
||||||
|
&pb.MessageWithMap{NameMapping: map[int32]string{}},
|
||||||
|
&pb.MessageWithMap{NameMapping: nil},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"orders in map don't matter",
|
||||||
|
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken", 2: "Rob"}},
|
||||||
|
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob", 1: "Ken"}},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"oneof same",
|
||||||
|
&pb.Communique{Union: &pb.Communique_Number{41}},
|
||||||
|
&pb.Communique{Union: &pb.Communique_Number{41}},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"oneof one nil",
|
||||||
|
&pb.Communique{Union: &pb.Communique_Number{41}},
|
||||||
|
&pb.Communique{},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"oneof different",
|
||||||
|
&pb.Communique{Union: &pb.Communique_Number{41}},
|
||||||
|
&pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEqual(t *testing.T) {
|
||||||
|
for _, tc := range EqualTests {
|
||||||
|
if res := Equal(tc.a, tc.b); res != tc.exp {
|
||||||
|
t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
587
vendor/src/github.com/golang/protobuf/proto/extensions.go
vendored
Normal file
587
vendor/src/github.com/golang/protobuf/proto/extensions.go
vendored
Normal file
|
@ -0,0 +1,587 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Types and routines for supporting protocol buffer extensions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
||||||
|
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||||
|
|
||||||
|
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
||||||
|
// Used in code generated by the protocol compiler.
|
||||||
|
type ExtensionRange struct {
|
||||||
|
Start, End int32 // both inclusive
|
||||||
|
}
|
||||||
|
|
||||||
|
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
||||||
|
// proto compiler that may be extended.
|
||||||
|
type extendableProto interface {
|
||||||
|
Message
|
||||||
|
ExtensionRangeArray() []ExtensionRange
|
||||||
|
extensionsWrite() map[int32]Extension
|
||||||
|
extensionsRead() (map[int32]Extension, sync.Locker)
|
||||||
|
}
|
||||||
|
|
||||||
|
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
||||||
|
// version of the proto compiler that may be extended.
|
||||||
|
type extendableProtoV1 interface {
|
||||||
|
Message
|
||||||
|
ExtensionRangeArray() []ExtensionRange
|
||||||
|
ExtensionMap() map[int32]Extension
|
||||||
|
}
|
||||||
|
|
||||||
|
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
||||||
|
type extensionAdapter struct {
|
||||||
|
extendableProtoV1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
||||||
|
return e.ExtensionMap()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||||
|
return e.ExtensionMap(), notLocker{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
||||||
|
type notLocker struct{}
|
||||||
|
|
||||||
|
func (n notLocker) Lock() {}
|
||||||
|
func (n notLocker) Unlock() {}
|
||||||
|
|
||||||
|
// extendable returns the extendableProto interface for the given generated proto message.
|
||||||
|
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||||
|
// the extendableProto interface.
|
||||||
|
func extendable(p interface{}) (extendableProto, bool) {
|
||||||
|
if ep, ok := p.(extendableProto); ok {
|
||||||
|
return ep, ok
|
||||||
|
}
|
||||||
|
if ep, ok := p.(extendableProtoV1); ok {
|
||||||
|
return extensionAdapter{ep}, ok
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||||
|
//
|
||||||
|
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
||||||
|
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
||||||
|
//
|
||||||
|
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
||||||
|
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
||||||
|
type XXX_InternalExtensions struct {
|
||||||
|
// The struct must be indirect so that if a user inadvertently copies a
|
||||||
|
// generated message and its embedded XXX_InternalExtensions, they
|
||||||
|
// avoid the mayhem of a copied mutex.
|
||||||
|
//
|
||||||
|
// The mutex serializes all logically read-only operations to p.extensionMap.
|
||||||
|
// It is up to the client to ensure that write operations to p.extensionMap are
|
||||||
|
// mutually exclusive with other accesses.
|
||||||
|
p *struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
extensionMap map[int32]Extension
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// extensionsWrite returns the extension map, creating it on first use.
|
||||||
|
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = new(struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
extensionMap map[int32]Extension
|
||||||
|
})
|
||||||
|
e.p.extensionMap = make(map[int32]Extension)
|
||||||
|
}
|
||||||
|
return e.p.extensionMap
|
||||||
|
}
|
||||||
|
|
||||||
|
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
||||||
|
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
||||||
|
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||||
|
if e.p == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return e.p.extensionMap, &e.p.mu
|
||||||
|
}
|
||||||
|
|
||||||
|
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
|
||||||
|
var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
|
||||||
|
|
||||||
|
// ExtensionDesc represents an extension specification.
|
||||||
|
// Used in generated code from the protocol compiler.
|
||||||
|
type ExtensionDesc struct {
|
||||||
|
ExtendedType Message // nil pointer to the type that is being extended
|
||||||
|
ExtensionType interface{} // nil pointer to the extension type
|
||||||
|
Field int32 // field number
|
||||||
|
Name string // fully-qualified name of extension, for text formatting
|
||||||
|
Tag string // protobuf tag style
|
||||||
|
Filename string // name of the file in which the extension is defined
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ed *ExtensionDesc) repeated() bool {
|
||||||
|
t := reflect.TypeOf(ed.ExtensionType)
|
||||||
|
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extension represents an extension in a message.
|
||||||
|
type Extension struct {
|
||||||
|
// When an extension is stored in a message using SetExtension
|
||||||
|
// only desc and value are set. When the message is marshaled
|
||||||
|
// enc will be set to the encoded form of the message.
|
||||||
|
//
|
||||||
|
// When a message is unmarshaled and contains extensions, each
|
||||||
|
// extension will have only enc set. When such an extension is
|
||||||
|
// accessed using GetExtension (or GetExtensions) desc and value
|
||||||
|
// will be set.
|
||||||
|
desc *ExtensionDesc
|
||||||
|
value interface{}
|
||||||
|
enc []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRawExtension is for testing only.
|
||||||
|
func SetRawExtension(base Message, id int32, b []byte) {
|
||||||
|
epb, ok := extendable(base)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
extmap := epb.extensionsWrite()
|
||||||
|
extmap[id] = Extension{enc: b}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isExtensionField returns true iff the given field number is in an extension range.
|
||||||
|
func isExtensionField(pb extendableProto, field int32) bool {
|
||||||
|
for _, er := range pb.ExtensionRangeArray() {
|
||||||
|
if er.Start <= field && field <= er.End {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||||
|
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||||
|
var pbi interface{} = pb
|
||||||
|
// Check the extended type.
|
||||||
|
if ea, ok := pbi.(extensionAdapter); ok {
|
||||||
|
pbi = ea.extendableProtoV1
|
||||||
|
}
|
||||||
|
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||||
|
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
|
||||||
|
}
|
||||||
|
// Check the range.
|
||||||
|
if !isExtensionField(pb, extension.Field) {
|
||||||
|
return errors.New("proto: bad extension number; not in declared ranges")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// extPropKey is sufficient to uniquely identify an extension.
|
||||||
|
type extPropKey struct {
|
||||||
|
base reflect.Type
|
||||||
|
field int32
|
||||||
|
}
|
||||||
|
|
||||||
|
var extProp = struct {
|
||||||
|
sync.RWMutex
|
||||||
|
m map[extPropKey]*Properties
|
||||||
|
}{
|
||||||
|
m: make(map[extPropKey]*Properties),
|
||||||
|
}
|
||||||
|
|
||||||
|
func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||||
|
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
||||||
|
|
||||||
|
extProp.RLock()
|
||||||
|
if prop, ok := extProp.m[key]; ok {
|
||||||
|
extProp.RUnlock()
|
||||||
|
return prop
|
||||||
|
}
|
||||||
|
extProp.RUnlock()
|
||||||
|
|
||||||
|
extProp.Lock()
|
||||||
|
defer extProp.Unlock()
|
||||||
|
// Check again.
|
||||||
|
if prop, ok := extProp.m[key]; ok {
|
||||||
|
return prop
|
||||||
|
}
|
||||||
|
|
||||||
|
prop := new(Properties)
|
||||||
|
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
||||||
|
extProp.m[key] = prop
|
||||||
|
return prop
|
||||||
|
}
|
||||||
|
|
||||||
|
// encode encodes any unmarshaled (unencoded) extensions in e.
|
||||||
|
func encodeExtensions(e *XXX_InternalExtensions) error {
|
||||||
|
m, mu := e.extensionsRead()
|
||||||
|
if m == nil {
|
||||||
|
return nil // fast path
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
return encodeExtensionsMap(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// encode encodes any unmarshaled (unencoded) extensions in e.
|
||||||
|
func encodeExtensionsMap(m map[int32]Extension) error {
|
||||||
|
for k, e := range m {
|
||||||
|
if e.value == nil || e.desc == nil {
|
||||||
|
// Extension is only in its encoded form.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't skip extensions that have an encoded form set,
|
||||||
|
// because the extension value may have been mutated after
|
||||||
|
// the last time this function was called.
|
||||||
|
|
||||||
|
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||||
|
props := extensionProperties(e.desc)
|
||||||
|
|
||||||
|
p := NewBuffer(nil)
|
||||||
|
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||||
|
// Pass a *T with a zero field and hope it all works out.
|
||||||
|
x := reflect.New(et)
|
||||||
|
x.Elem().Set(reflect.ValueOf(e.value))
|
||||||
|
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e.enc = p.buf
|
||||||
|
m[k] = e
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func extensionsSize(e *XXX_InternalExtensions) (n int) {
|
||||||
|
m, mu := e.extensionsRead()
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
return extensionsMapSize(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func extensionsMapSize(m map[int32]Extension) (n int) {
|
||||||
|
for _, e := range m {
|
||||||
|
if e.value == nil || e.desc == nil {
|
||||||
|
// Extension is only in its encoded form.
|
||||||
|
n += len(e.enc)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't skip extensions that have an encoded form set,
|
||||||
|
// because the extension value may have been mutated after
|
||||||
|
// the last time this function was called.
|
||||||
|
|
||||||
|
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||||
|
props := extensionProperties(e.desc)
|
||||||
|
|
||||||
|
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||||
|
// Pass a *T with a zero field and hope it all works out.
|
||||||
|
x := reflect.New(et)
|
||||||
|
x.Elem().Set(reflect.ValueOf(e.value))
|
||||||
|
n += props.size(props, toStructPointer(x))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasExtension returns whether the given extension is present in pb.
|
||||||
|
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||||
|
// TODO: Check types, field numbers, etc.?
|
||||||
|
epb, ok := extendable(pb)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
extmap, mu := epb.extensionsRead()
|
||||||
|
if extmap == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
_, ok = extmap[extension.Field]
|
||||||
|
mu.Unlock()
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearExtension removes the given extension from pb.
|
||||||
|
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||||
|
epb, ok := extendable(pb)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// TODO: Check types, field numbers, etc.?
|
||||||
|
extmap := epb.extensionsWrite()
|
||||||
|
delete(extmap, extension.Field)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetExtension parses and returns the given extension of pb.
|
||||||
|
// If the extension is not present and has no default value it returns ErrMissingExtension.
|
||||||
|
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||||
|
epb, ok := extendable(pb)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("proto: not an extendable proto")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
emap, mu := epb.extensionsRead()
|
||||||
|
if emap == nil {
|
||||||
|
return defaultExtensionValue(extension)
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
e, ok := emap[extension.Field]
|
||||||
|
if !ok {
|
||||||
|
// defaultExtensionValue returns the default value or
|
||||||
|
// ErrMissingExtension if there is no default.
|
||||||
|
return defaultExtensionValue(extension)
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.value != nil {
|
||||||
|
// Already decoded. Check the descriptor, though.
|
||||||
|
if e.desc != extension {
|
||||||
|
// This shouldn't happen. If it does, it means that
|
||||||
|
// GetExtension was called twice with two different
|
||||||
|
// descriptors with the same field number.
|
||||||
|
return nil, errors.New("proto: descriptor conflict")
|
||||||
|
}
|
||||||
|
return e.value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := decodeExtension(e.enc, extension)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remember the decoded version and drop the encoded version.
|
||||||
|
// That way it is safe to mutate what we return.
|
||||||
|
e.value = v
|
||||||
|
e.desc = extension
|
||||||
|
e.enc = nil
|
||||||
|
emap[extension.Field] = e
|
||||||
|
return e.value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultExtensionValue returns the default value for extension.
|
||||||
|
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||||
|
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||||
|
t := reflect.TypeOf(extension.ExtensionType)
|
||||||
|
props := extensionProperties(extension)
|
||||||
|
|
||||||
|
sf, _, err := fieldDefault(t, props)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sf == nil || sf.value == nil {
|
||||||
|
// There is no default value.
|
||||||
|
return nil, ErrMissingExtension
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.Kind() != reflect.Ptr {
|
||||||
|
// We do not need to return a Ptr, we can directly return sf.value.
|
||||||
|
return sf.value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to return an interface{} that is a pointer to sf.value.
|
||||||
|
value := reflect.New(t).Elem()
|
||||||
|
value.Set(reflect.New(value.Type().Elem()))
|
||||||
|
if sf.kind == reflect.Int32 {
|
||||||
|
// We may have an int32 or an enum, but the underlying data is int32.
|
||||||
|
// Since we can't set an int32 into a non int32 reflect.value directly
|
||||||
|
// set it as a int32.
|
||||||
|
value.Elem().SetInt(int64(sf.value.(int32)))
|
||||||
|
} else {
|
||||||
|
value.Elem().Set(reflect.ValueOf(sf.value))
|
||||||
|
}
|
||||||
|
return value.Interface(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeExtension decodes an extension encoded in b.
|
||||||
|
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||||
|
o := NewBuffer(b)
|
||||||
|
|
||||||
|
t := reflect.TypeOf(extension.ExtensionType)
|
||||||
|
|
||||||
|
props := extensionProperties(extension)
|
||||||
|
|
||||||
|
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||||
|
// Allocate a "field" to store the pointer/slice itself; the
|
||||||
|
// pointer/slice will be stored here. We pass
|
||||||
|
// the address of this field to props.dec.
|
||||||
|
// This passes a zero field and a *t and lets props.dec
|
||||||
|
// interpret it as a *struct{ x t }.
|
||||||
|
value := reflect.New(t).Elem()
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Discard wire type and field number varint. It isn't needed.
|
||||||
|
if _, err := o.DecodeVarint(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.index >= len(o.buf) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return value.Interface(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||||
|
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||||
|
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||||
|
epb, ok := extendable(pb)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("proto: not an extendable proto")
|
||||||
|
}
|
||||||
|
extensions = make([]interface{}, len(es))
|
||||||
|
for i, e := range es {
|
||||||
|
extensions[i], err = GetExtension(epb, e)
|
||||||
|
if err == ErrMissingExtension {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||||
|
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||||
|
// just the Field field, which defines the extension's field number.
|
||||||
|
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||||
|
epb, ok := extendable(pb)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
|
||||||
|
}
|
||||||
|
registeredExtensions := RegisteredExtensions(pb)
|
||||||
|
|
||||||
|
emap, mu := epb.extensionsRead()
|
||||||
|
if emap == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||||
|
for extid, e := range emap {
|
||||||
|
desc := e.desc
|
||||||
|
if desc == nil {
|
||||||
|
desc = registeredExtensions[extid]
|
||||||
|
if desc == nil {
|
||||||
|
desc = &ExtensionDesc{Field: extid}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extensions = append(extensions, desc)
|
||||||
|
}
|
||||||
|
return extensions, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetExtension sets the specified extension of pb to the specified value.
|
||||||
|
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||||
|
epb, ok := extendable(pb)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("proto: not an extendable proto")
|
||||||
|
}
|
||||||
|
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
typ := reflect.TypeOf(extension.ExtensionType)
|
||||||
|
if typ != reflect.TypeOf(value) {
|
||||||
|
return errors.New("proto: bad extension value type")
|
||||||
|
}
|
||||||
|
// nil extension values need to be caught early, because the
|
||||||
|
// encoder can't distinguish an ErrNil due to a nil extension
|
||||||
|
// from an ErrNil due to a missing field. Extensions are
|
||||||
|
// always optional, so the encoder would just swallow the error
|
||||||
|
// and drop all the extensions from the encoded message.
|
||||||
|
if reflect.ValueOf(value).IsNil() {
|
||||||
|
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||||
|
}
|
||||||
|
|
||||||
|
extmap := epb.extensionsWrite()
|
||||||
|
extmap[extension.Field] = Extension{desc: extension, value: value}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAllExtensions clears all extensions from pb.
|
||||||
|
func ClearAllExtensions(pb Message) {
|
||||||
|
epb, ok := extendable(pb)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m := epb.extensionsWrite()
|
||||||
|
for k := range m {
|
||||||
|
delete(m, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A global registry of extensions.
|
||||||
|
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||||
|
|
||||||
|
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
||||||
|
|
||||||
|
// RegisterExtension is called from the generated code.
|
||||||
|
func RegisterExtension(desc *ExtensionDesc) {
|
||||||
|
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
||||||
|
m := extensionMaps[st]
|
||||||
|
if m == nil {
|
||||||
|
m = make(map[int32]*ExtensionDesc)
|
||||||
|
extensionMaps[st] = m
|
||||||
|
}
|
||||||
|
if _, ok := m[desc.Field]; ok {
|
||||||
|
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
||||||
|
}
|
||||||
|
m[desc.Field] = desc
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisteredExtensions returns a map of the registered extensions of a
|
||||||
|
// protocol buffer struct, indexed by the extension number.
|
||||||
|
// The argument pb should be a nil pointer to the struct type.
|
||||||
|
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||||
|
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||||
|
}
|
536
vendor/src/github.com/golang/protobuf/proto/extensions_test.go
vendored
Normal file
536
vendor/src/github.com/golang/protobuf/proto/extensions_test.go
vendored
Normal file
|
@ -0,0 +1,536 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
pb "github.com/golang/protobuf/proto/testdata"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetExtensionsWithMissingExtensions(t *testing.T) {
|
||||||
|
msg := &pb.MyMessage{}
|
||||||
|
ext1 := &pb.Ext{}
|
||||||
|
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
|
||||||
|
t.Fatalf("Could not set ext1: %s", err)
|
||||||
|
}
|
||||||
|
exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{
|
||||||
|
pb.E_Ext_More,
|
||||||
|
pb.E_Ext_Text,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("GetExtensions() failed: %s", err)
|
||||||
|
}
|
||||||
|
if exts[0] != ext1 {
|
||||||
|
t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0])
|
||||||
|
}
|
||||||
|
if exts[1] != nil {
|
||||||
|
t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExtensionDescsWithMissingExtensions(t *testing.T) {
|
||||||
|
msg := &pb.MyMessage{Count: proto.Int32(0)}
|
||||||
|
extdesc1 := pb.E_Ext_More
|
||||||
|
if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil {
|
||||||
|
t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ext1 := &pb.Ext{}
|
||||||
|
if err := proto.SetExtension(msg, extdesc1, ext1); err != nil {
|
||||||
|
t.Fatalf("Could not set ext1: %s", err)
|
||||||
|
}
|
||||||
|
extdesc2 := &proto.ExtensionDesc{
|
||||||
|
ExtendedType: (*pb.MyMessage)(nil),
|
||||||
|
ExtensionType: (*bool)(nil),
|
||||||
|
Field: 123456789,
|
||||||
|
Name: "a.b",
|
||||||
|
Tag: "varint,123456789,opt",
|
||||||
|
}
|
||||||
|
ext2 := proto.Bool(false)
|
||||||
|
if err := proto.SetExtension(msg, extdesc2, ext2); err != nil {
|
||||||
|
t.Fatalf("Could not set ext2: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := proto.Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Could not marshal msg: %v", err)
|
||||||
|
}
|
||||||
|
if err := proto.Unmarshal(b, msg); err != nil {
|
||||||
|
t.Fatalf("Could not unmarshal into msg: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
descs, err := proto.ExtensionDescs(msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("proto.ExtensionDescs: got error %v", err)
|
||||||
|
}
|
||||||
|
sortExtDescs(descs)
|
||||||
|
wantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}}
|
||||||
|
if !reflect.DeepEqual(descs, wantDescs) {
|
||||||
|
t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExtensionDescSlice []*proto.ExtensionDesc
|
||||||
|
|
||||||
|
func (s ExtensionDescSlice) Len() int { return len(s) }
|
||||||
|
func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field }
|
||||||
|
func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
|
||||||
|
func sortExtDescs(s []*proto.ExtensionDesc) {
|
||||||
|
sort.Sort(ExtensionDescSlice(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetExtensionStability(t *testing.T) {
|
||||||
|
check := func(m *pb.MyMessage) bool {
|
||||||
|
ext1, err := proto.GetExtension(m, pb.E_Ext_More)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("GetExtension() failed: %s", err)
|
||||||
|
}
|
||||||
|
ext2, err := proto.GetExtension(m, pb.E_Ext_More)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("GetExtension() failed: %s", err)
|
||||||
|
}
|
||||||
|
return ext1 == ext2
|
||||||
|
}
|
||||||
|
msg := &pb.MyMessage{Count: proto.Int32(4)}
|
||||||
|
ext0 := &pb.Ext{}
|
||||||
|
if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {
|
||||||
|
t.Fatalf("Could not set ext1: %s", ext0)
|
||||||
|
}
|
||||||
|
if !check(msg) {
|
||||||
|
t.Errorf("GetExtension() not stable before marshaling")
|
||||||
|
}
|
||||||
|
bb, err := proto.Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Marshal() failed: %s", err)
|
||||||
|
}
|
||||||
|
msg1 := &pb.MyMessage{}
|
||||||
|
err = proto.Unmarshal(bb, msg1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unmarshal() failed: %s", err)
|
||||||
|
}
|
||||||
|
if !check(msg1) {
|
||||||
|
t.Errorf("GetExtension() not stable after unmarshaling")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetExtensionDefaults(t *testing.T) {
|
||||||
|
var setFloat64 float64 = 1
|
||||||
|
var setFloat32 float32 = 2
|
||||||
|
var setInt32 int32 = 3
|
||||||
|
var setInt64 int64 = 4
|
||||||
|
var setUint32 uint32 = 5
|
||||||
|
var setUint64 uint64 = 6
|
||||||
|
var setBool = true
|
||||||
|
var setBool2 = false
|
||||||
|
var setString = "Goodnight string"
|
||||||
|
var setBytes = []byte("Goodnight bytes")
|
||||||
|
var setEnum = pb.DefaultsMessage_TWO
|
||||||
|
|
||||||
|
type testcase struct {
|
||||||
|
ext *proto.ExtensionDesc // Extension we are testing.
|
||||||
|
want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail).
|
||||||
|
def interface{} // Expected value of extension after ClearExtension().
|
||||||
|
}
|
||||||
|
tests := []testcase{
|
||||||
|
{pb.E_NoDefaultDouble, setFloat64, nil},
|
||||||
|
{pb.E_NoDefaultFloat, setFloat32, nil},
|
||||||
|
{pb.E_NoDefaultInt32, setInt32, nil},
|
||||||
|
{pb.E_NoDefaultInt64, setInt64, nil},
|
||||||
|
{pb.E_NoDefaultUint32, setUint32, nil},
|
||||||
|
{pb.E_NoDefaultUint64, setUint64, nil},
|
||||||
|
{pb.E_NoDefaultSint32, setInt32, nil},
|
||||||
|
{pb.E_NoDefaultSint64, setInt64, nil},
|
||||||
|
{pb.E_NoDefaultFixed32, setUint32, nil},
|
||||||
|
{pb.E_NoDefaultFixed64, setUint64, nil},
|
||||||
|
{pb.E_NoDefaultSfixed32, setInt32, nil},
|
||||||
|
{pb.E_NoDefaultSfixed64, setInt64, nil},
|
||||||
|
{pb.E_NoDefaultBool, setBool, nil},
|
||||||
|
{pb.E_NoDefaultBool, setBool2, nil},
|
||||||
|
{pb.E_NoDefaultString, setString, nil},
|
||||||
|
{pb.E_NoDefaultBytes, setBytes, nil},
|
||||||
|
{pb.E_NoDefaultEnum, setEnum, nil},
|
||||||
|
{pb.E_DefaultDouble, setFloat64, float64(3.1415)},
|
||||||
|
{pb.E_DefaultFloat, setFloat32, float32(3.14)},
|
||||||
|
{pb.E_DefaultInt32, setInt32, int32(42)},
|
||||||
|
{pb.E_DefaultInt64, setInt64, int64(43)},
|
||||||
|
{pb.E_DefaultUint32, setUint32, uint32(44)},
|
||||||
|
{pb.E_DefaultUint64, setUint64, uint64(45)},
|
||||||
|
{pb.E_DefaultSint32, setInt32, int32(46)},
|
||||||
|
{pb.E_DefaultSint64, setInt64, int64(47)},
|
||||||
|
{pb.E_DefaultFixed32, setUint32, uint32(48)},
|
||||||
|
{pb.E_DefaultFixed64, setUint64, uint64(49)},
|
||||||
|
{pb.E_DefaultSfixed32, setInt32, int32(50)},
|
||||||
|
{pb.E_DefaultSfixed64, setInt64, int64(51)},
|
||||||
|
{pb.E_DefaultBool, setBool, true},
|
||||||
|
{pb.E_DefaultBool, setBool2, true},
|
||||||
|
{pb.E_DefaultString, setString, "Hello, string"},
|
||||||
|
{pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")},
|
||||||
|
{pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE},
|
||||||
|
}
|
||||||
|
|
||||||
|
checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error {
|
||||||
|
val, err := proto.GetExtension(msg, test.ext)
|
||||||
|
if err != nil {
|
||||||
|
if valWant != nil {
|
||||||
|
return fmt.Errorf("GetExtension(): %s", err)
|
||||||
|
}
|
||||||
|
if want := proto.ErrMissingExtension; err != want {
|
||||||
|
return fmt.Errorf("Unexpected error: got %v, want %v", err, want)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// All proto2 extension values are either a pointer to a value or a slice of values.
|
||||||
|
ty := reflect.TypeOf(val)
|
||||||
|
tyWant := reflect.TypeOf(test.ext.ExtensionType)
|
||||||
|
if got, want := ty, tyWant; got != want {
|
||||||
|
return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
tye := ty.Elem()
|
||||||
|
tyeWant := tyWant.Elem()
|
||||||
|
if got, want := tye, tyeWant; got != want {
|
||||||
|
return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the name of the type of the value.
|
||||||
|
// If it is an enum it will be type int32 with the name of the enum.
|
||||||
|
if got, want := tye.Name(), tye.Name(); got != want {
|
||||||
|
return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that value is what we expect.
|
||||||
|
// If we have a pointer in val, get the value it points to.
|
||||||
|
valExp := val
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
valExp = reflect.ValueOf(val).Elem().Interface()
|
||||||
|
}
|
||||||
|
if got, want := valExp, valWant; !reflect.DeepEqual(got, want) {
|
||||||
|
return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
setTo := func(test testcase) interface{} {
|
||||||
|
setTo := reflect.ValueOf(test.want)
|
||||||
|
if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr {
|
||||||
|
setTo = reflect.New(typ).Elem()
|
||||||
|
setTo.Set(reflect.New(setTo.Type().Elem()))
|
||||||
|
setTo.Elem().Set(reflect.ValueOf(test.want))
|
||||||
|
}
|
||||||
|
return setTo.Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
msg := &pb.DefaultsMessage{}
|
||||||
|
name := test.ext.Name
|
||||||
|
|
||||||
|
// Check the initial value.
|
||||||
|
if err := checkVal(test, msg, test.def); err != nil {
|
||||||
|
t.Errorf("%s: %v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the per-type value and check value.
|
||||||
|
name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want)
|
||||||
|
if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil {
|
||||||
|
t.Errorf("%s: SetExtension(): %v", name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := checkVal(test, msg, test.want); err != nil {
|
||||||
|
t.Errorf("%s: %v", name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set and check the value.
|
||||||
|
name += " (cleared)"
|
||||||
|
proto.ClearExtension(msg, test.ext)
|
||||||
|
if err := checkVal(test, msg, test.def); err != nil {
|
||||||
|
t.Errorf("%s: %v", name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExtensionsRoundTrip(t *testing.T) {
|
||||||
|
msg := &pb.MyMessage{}
|
||||||
|
ext1 := &pb.Ext{
|
||||||
|
Data: proto.String("hi"),
|
||||||
|
}
|
||||||
|
ext2 := &pb.Ext{
|
||||||
|
Data: proto.String("there"),
|
||||||
|
}
|
||||||
|
exists := proto.HasExtension(msg, pb.E_Ext_More)
|
||||||
|
if exists {
|
||||||
|
t.Error("Extension More present unexpectedly")
|
||||||
|
}
|
||||||
|
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
e, err := proto.GetExtension(msg, pb.E_Ext_More)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
x, ok := e.(*pb.Ext)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("e has type %T, expected testdata.Ext", e)
|
||||||
|
} else if *x.Data != "there" {
|
||||||
|
t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x)
|
||||||
|
}
|
||||||
|
proto.ClearExtension(msg, pb.E_Ext_More)
|
||||||
|
if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {
|
||||||
|
t.Errorf("got %v, expected ErrMissingExtension", e)
|
||||||
|
}
|
||||||
|
if _, err := proto.GetExtension(msg, pb.E_X215); err == nil {
|
||||||
|
t.Error("expected bad extension error, got nil")
|
||||||
|
}
|
||||||
|
if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {
|
||||||
|
t.Error("expected extension err")
|
||||||
|
}
|
||||||
|
if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {
|
||||||
|
t.Error("expected some sort of type mismatch error, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNilExtension(t *testing.T) {
|
||||||
|
msg := &pb.MyMessage{
|
||||||
|
Count: proto.Int32(1),
|
||||||
|
}
|
||||||
|
if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {
|
||||||
|
t.Error("expected SetExtension to fail due to a nil extension")
|
||||||
|
} else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want {
|
||||||
|
t.Errorf("expected error %v, got %v", want, err)
|
||||||
|
}
|
||||||
|
// Note: if the behavior of Marshal is ever changed to ignore nil extensions, update
|
||||||
|
// this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalUnmarshalRepeatedExtension(t *testing.T) {
|
||||||
|
// Add a repeated extension to the result.
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
ext []*pb.ComplexExtension
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"two fields",
|
||||||
|
[]*pb.ComplexExtension{
|
||||||
|
{First: proto.Int32(7)},
|
||||||
|
{Second: proto.Int32(11)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"repeated field",
|
||||||
|
[]*pb.ComplexExtension{
|
||||||
|
{Third: []int32{1000}},
|
||||||
|
{Third: []int32{2000}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"two fields and repeated field",
|
||||||
|
[]*pb.ComplexExtension{
|
||||||
|
{Third: []int32{1000}},
|
||||||
|
{First: proto.Int32(9)},
|
||||||
|
{Second: proto.Int32(21)},
|
||||||
|
{Third: []int32{2000}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
// Marshal message with a repeated extension.
|
||||||
|
msg1 := new(pb.OtherMessage)
|
||||||
|
err := proto.SetExtension(msg1, pb.E_RComplex, test.ext)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("[%s] Error setting extension: %v", test.name, err)
|
||||||
|
}
|
||||||
|
b, err := proto.Marshal(msg1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("[%s] Error marshaling message: %v", test.name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal and read the merged proto.
|
||||||
|
msg2 := new(pb.OtherMessage)
|
||||||
|
err = proto.Unmarshal(b, msg2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
|
||||||
|
}
|
||||||
|
e, err := proto.GetExtension(msg2, pb.E_RComplex)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("[%s] Error getting extension: %v", test.name, err)
|
||||||
|
}
|
||||||
|
ext := e.([]*pb.ComplexExtension)
|
||||||
|
if ext == nil {
|
||||||
|
t.Fatalf("[%s] Invalid extension", test.name)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(ext, test.ext) {
|
||||||
|
t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) {
|
||||||
|
// We may see multiple instances of the same extension in the wire
|
||||||
|
// format. For example, the proto compiler may encode custom options in
|
||||||
|
// this way. Here, we verify that we merge the extensions together.
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
ext []*pb.ComplexExtension
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"two fields",
|
||||||
|
[]*pb.ComplexExtension{
|
||||||
|
{First: proto.Int32(7)},
|
||||||
|
{Second: proto.Int32(11)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"repeated field",
|
||||||
|
[]*pb.ComplexExtension{
|
||||||
|
{Third: []int32{1000}},
|
||||||
|
{Third: []int32{2000}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"two fields and repeated field",
|
||||||
|
[]*pb.ComplexExtension{
|
||||||
|
{Third: []int32{1000}},
|
||||||
|
{First: proto.Int32(9)},
|
||||||
|
{Second: proto.Int32(21)},
|
||||||
|
{Third: []int32{2000}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
var want pb.ComplexExtension
|
||||||
|
|
||||||
|
// Generate a serialized representation of a repeated extension
|
||||||
|
// by catenating bytes together.
|
||||||
|
for i, e := range test.ext {
|
||||||
|
// Merge to create the wanted proto.
|
||||||
|
proto.Merge(&want, e)
|
||||||
|
|
||||||
|
// serialize the message
|
||||||
|
msg := new(pb.OtherMessage)
|
||||||
|
err := proto.SetExtension(msg, pb.E_Complex, e)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err)
|
||||||
|
}
|
||||||
|
b, err := proto.Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err)
|
||||||
|
}
|
||||||
|
buf.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal and read the merged proto.
|
||||||
|
msg2 := new(pb.OtherMessage)
|
||||||
|
err := proto.Unmarshal(buf.Bytes(), msg2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
|
||||||
|
}
|
||||||
|
e, err := proto.GetExtension(msg2, pb.E_Complex)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("[%s] Error getting extension: %v", test.name, err)
|
||||||
|
}
|
||||||
|
ext := e.(*pb.ComplexExtension)
|
||||||
|
if ext == nil {
|
||||||
|
t.Fatalf("[%s] Invalid extension", test.name)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(*ext, want) {
|
||||||
|
t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClearAllExtensions(t *testing.T) {
|
||||||
|
// unregistered extension
|
||||||
|
desc := &proto.ExtensionDesc{
|
||||||
|
ExtendedType: (*pb.MyMessage)(nil),
|
||||||
|
ExtensionType: (*bool)(nil),
|
||||||
|
Field: 101010100,
|
||||||
|
Name: "emptyextension",
|
||||||
|
Tag: "varint,0,opt",
|
||||||
|
}
|
||||||
|
m := &pb.MyMessage{}
|
||||||
|
if proto.HasExtension(m, desc) {
|
||||||
|
t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m))
|
||||||
|
}
|
||||||
|
if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil {
|
||||||
|
t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err)
|
||||||
|
}
|
||||||
|
if !proto.HasExtension(m, desc) {
|
||||||
|
t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m))
|
||||||
|
}
|
||||||
|
proto.ClearAllExtensions(m)
|
||||||
|
if proto.HasExtension(m, desc) {
|
||||||
|
t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalRace(t *testing.T) {
|
||||||
|
// unregistered extension
|
||||||
|
desc := &proto.ExtensionDesc{
|
||||||
|
ExtendedType: (*pb.MyMessage)(nil),
|
||||||
|
ExtensionType: (*bool)(nil),
|
||||||
|
Field: 101010100,
|
||||||
|
Name: "emptyextension",
|
||||||
|
Tag: "varint,0,opt",
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &pb.MyMessage{Count: proto.Int32(4)}
|
||||||
|
if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil {
|
||||||
|
t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var g errgroup.Group
|
||||||
|
for n := 3; n > 0; n-- {
|
||||||
|
g.Go(func() error {
|
||||||
|
_, err := proto.Marshal(m)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
898
vendor/src/github.com/golang/protobuf/proto/lib.go
vendored
Normal file
898
vendor/src/github.com/golang/protobuf/proto/lib.go
vendored
Normal file
|
@ -0,0 +1,898 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package proto converts data structures to and from the wire format of
|
||||||
|
protocol buffers. It works in concert with the Go source code generated
|
||||||
|
for .proto files by the protocol compiler.
|
||||||
|
|
||||||
|
A summary of the properties of the protocol buffer interface
|
||||||
|
for a protocol buffer variable v:
|
||||||
|
|
||||||
|
- Names are turned from camel_case to CamelCase for export.
|
||||||
|
- There are no methods on v to set fields; just treat
|
||||||
|
them as structure fields.
|
||||||
|
- There are getters that return a field's value if set,
|
||||||
|
and return the field's default value if unset.
|
||||||
|
The getters work even if the receiver is a nil message.
|
||||||
|
- The zero value for a struct is its correct initialization state.
|
||||||
|
All desired fields must be set before marshaling.
|
||||||
|
- A Reset() method will restore a protobuf struct to its zero state.
|
||||||
|
- Non-repeated fields are pointers to the values; nil means unset.
|
||||||
|
That is, optional or required field int32 f becomes F *int32.
|
||||||
|
- Repeated fields are slices.
|
||||||
|
- Helper functions are available to aid the setting of fields.
|
||||||
|
msg.Foo = proto.String("hello") // set field
|
||||||
|
- Constants are defined to hold the default values of all fields that
|
||||||
|
have them. They have the form Default_StructName_FieldName.
|
||||||
|
Because the getter methods handle defaulted values,
|
||||||
|
direct use of these constants should be rare.
|
||||||
|
- Enums are given type names and maps from names to values.
|
||||||
|
Enum values are prefixed by the enclosing message's name, or by the
|
||||||
|
enum's type name if it is a top-level enum. Enum types have a String
|
||||||
|
method, and a Enum method to assist in message construction.
|
||||||
|
- Nested messages, groups and enums have type names prefixed with the name of
|
||||||
|
the surrounding message type.
|
||||||
|
- Extensions are given descriptor names that start with E_,
|
||||||
|
followed by an underscore-delimited list of the nested messages
|
||||||
|
that contain it (if any) followed by the CamelCased name of the
|
||||||
|
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||||
|
and SetExtension are functions for manipulating extensions.
|
||||||
|
- Oneof field sets are given a single field in their message,
|
||||||
|
with distinguished wrapper types for each possible field value.
|
||||||
|
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||||
|
|
||||||
|
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||||
|
|
||||||
|
- Non-repeated fields of non-message type are values instead of pointers.
|
||||||
|
- Getters are only generated for message and oneof fields.
|
||||||
|
- Enum types do not get an Enum method.
|
||||||
|
|
||||||
|
The simplest way to describe this is to see an example.
|
||||||
|
Given file test.proto, containing
|
||||||
|
|
||||||
|
package example;
|
||||||
|
|
||||||
|
enum FOO { X = 17; }
|
||||||
|
|
||||||
|
message Test {
|
||||||
|
required string label = 1;
|
||||||
|
optional int32 type = 2 [default=77];
|
||||||
|
repeated int64 reps = 3;
|
||||||
|
optional group OptionalGroup = 4 {
|
||||||
|
required string RequiredField = 5;
|
||||||
|
}
|
||||||
|
oneof union {
|
||||||
|
int32 number = 6;
|
||||||
|
string name = 7;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
The resulting file, test.pb.go, is:
|
||||||
|
|
||||||
|
package example
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
type FOO int32
|
||||||
|
const (
|
||||||
|
FOO_X FOO = 17
|
||||||
|
)
|
||||||
|
var FOO_name = map[int32]string{
|
||||||
|
17: "X",
|
||||||
|
}
|
||||||
|
var FOO_value = map[string]int32{
|
||||||
|
"X": 17,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x FOO) Enum() *FOO {
|
||||||
|
p := new(FOO)
|
||||||
|
*p = x
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
func (x FOO) String() string {
|
||||||
|
return proto.EnumName(FOO_name, int32(x))
|
||||||
|
}
|
||||||
|
func (x *FOO) UnmarshalJSON(data []byte) error {
|
||||||
|
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = FOO(value)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Test struct {
|
||||||
|
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
||||||
|
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
||||||
|
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
||||||
|
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
||||||
|
// Types that are valid to be assigned to Union:
|
||||||
|
// *Test_Number
|
||||||
|
// *Test_Name
|
||||||
|
Union isTest_Union `protobuf_oneof:"union"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
}
|
||||||
|
func (m *Test) Reset() { *m = Test{} }
|
||||||
|
func (m *Test) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Test) ProtoMessage() {}
|
||||||
|
|
||||||
|
type isTest_Union interface {
|
||||||
|
isTest_Union()
|
||||||
|
}
|
||||||
|
|
||||||
|
type Test_Number struct {
|
||||||
|
Number int32 `protobuf:"varint,6,opt,name=number"`
|
||||||
|
}
|
||||||
|
type Test_Name struct {
|
||||||
|
Name string `protobuf:"bytes,7,opt,name=name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Test_Number) isTest_Union() {}
|
||||||
|
func (*Test_Name) isTest_Union() {}
|
||||||
|
|
||||||
|
func (m *Test) GetUnion() isTest_Union {
|
||||||
|
if m != nil {
|
||||||
|
return m.Union
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
const Default_Test_Type int32 = 77
|
||||||
|
|
||||||
|
func (m *Test) GetLabel() string {
|
||||||
|
if m != nil && m.Label != nil {
|
||||||
|
return *m.Label
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Test) GetType() int32 {
|
||||||
|
if m != nil && m.Type != nil {
|
||||||
|
return *m.Type
|
||||||
|
}
|
||||||
|
return Default_Test_Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
||||||
|
if m != nil {
|
||||||
|
return m.Optionalgroup
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Test_OptionalGroup struct {
|
||||||
|
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
||||||
|
}
|
||||||
|
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
||||||
|
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
||||||
|
|
||||||
|
func (m *Test_OptionalGroup) GetRequiredField() string {
|
||||||
|
if m != nil && m.RequiredField != nil {
|
||||||
|
return *m.RequiredField
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Test) GetNumber() int32 {
|
||||||
|
if x, ok := m.GetUnion().(*Test_Number); ok {
|
||||||
|
return x.Number
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Test) GetName() string {
|
||||||
|
if x, ok := m.GetUnion().(*Test_Name); ok {
|
||||||
|
return x.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
To create and play with a Test object:
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
pb "./example.pb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
test := &pb.Test{
|
||||||
|
Label: proto.String("hello"),
|
||||||
|
Type: proto.Int32(17),
|
||||||
|
Reps: []int64{1, 2, 3},
|
||||||
|
Optionalgroup: &pb.Test_OptionalGroup{
|
||||||
|
RequiredField: proto.String("good bye"),
|
||||||
|
},
|
||||||
|
Union: &pb.Test_Name{"fred"},
|
||||||
|
}
|
||||||
|
data, err := proto.Marshal(test)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("marshaling error: ", err)
|
||||||
|
}
|
||||||
|
newTest := &pb.Test{}
|
||||||
|
err = proto.Unmarshal(data, newTest)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("unmarshaling error: ", err)
|
||||||
|
}
|
||||||
|
// Now test and newTest contain the same data.
|
||||||
|
if test.GetLabel() != newTest.GetLabel() {
|
||||||
|
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||||
|
}
|
||||||
|
// Use a type switch to determine which oneof was set.
|
||||||
|
switch u := test.Union.(type) {
|
||||||
|
case *pb.Test_Number: // u.Number contains the number.
|
||||||
|
case *pb.Test_Name: // u.Name contains the string.
|
||||||
|
}
|
||||||
|
// etc.
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Message is implemented by generated protocol buffer messages.
|
||||||
|
type Message interface {
|
||||||
|
Reset()
|
||||||
|
String() string
|
||||||
|
ProtoMessage()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats records allocation details about the protocol buffer encoders
|
||||||
|
// and decoders. Useful for tuning the library itself.
|
||||||
|
type Stats struct {
|
||||||
|
Emalloc uint64 // mallocs in encode
|
||||||
|
Dmalloc uint64 // mallocs in decode
|
||||||
|
Encode uint64 // number of encodes
|
||||||
|
Decode uint64 // number of decodes
|
||||||
|
Chit uint64 // number of cache hits
|
||||||
|
Cmiss uint64 // number of cache misses
|
||||||
|
Size uint64 // number of sizes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set to true to enable stats collection.
|
||||||
|
const collectStats = false
|
||||||
|
|
||||||
|
var stats Stats
|
||||||
|
|
||||||
|
// GetStats returns a copy of the global Stats structure.
|
||||||
|
func GetStats() Stats { return stats }
|
||||||
|
|
||||||
|
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||||
|
// protocol buffers. It may be reused between invocations to
|
||||||
|
// reduce memory usage. It is not necessary to use a Buffer;
|
||||||
|
// the global functions Marshal and Unmarshal create a
|
||||||
|
// temporary Buffer and are fine for most applications.
|
||||||
|
type Buffer struct {
|
||||||
|
buf []byte // encode/decode byte stream
|
||||||
|
index int // read point
|
||||||
|
|
||||||
|
// pools of basic types to amortize allocation.
|
||||||
|
bools []bool
|
||||||
|
uint32s []uint32
|
||||||
|
uint64s []uint64
|
||||||
|
|
||||||
|
// extra pools, only used with pointer_reflect.go
|
||||||
|
int32s []int32
|
||||||
|
int64s []int64
|
||||||
|
float32s []float32
|
||||||
|
float64s []float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||||
|
// the contents of the argument slice.
|
||||||
|
func NewBuffer(e []byte) *Buffer {
|
||||||
|
return &Buffer{buf: e}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||||
|
func (p *Buffer) Reset() {
|
||||||
|
p.buf = p.buf[0:0] // for reading/writing
|
||||||
|
p.index = 0 // for reading
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBuf replaces the internal buffer with the slice,
|
||||||
|
// ready for unmarshaling the contents of the slice.
|
||||||
|
func (p *Buffer) SetBuf(s []byte) {
|
||||||
|
p.buf = s
|
||||||
|
p.index = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns the contents of the Buffer.
|
||||||
|
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Bool is a helper routine that allocates a new bool value
|
||||||
|
// to store v and returns a pointer to it.
|
||||||
|
func Bool(v bool) *bool {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32 is a helper routine that allocates a new int32 value
|
||||||
|
// to store v and returns a pointer to it.
|
||||||
|
func Int32(v int32) *int32 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int is a helper routine that allocates a new int32 value
|
||||||
|
// to store v and returns a pointer to it, but unlike Int32
|
||||||
|
// its argument value is an int.
|
||||||
|
func Int(v int) *int32 {
|
||||||
|
p := new(int32)
|
||||||
|
*p = int32(v)
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 is a helper routine that allocates a new int64 value
|
||||||
|
// to store v and returns a pointer to it.
|
||||||
|
func Int64(v int64) *int64 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32 is a helper routine that allocates a new float32 value
|
||||||
|
// to store v and returns a pointer to it.
|
||||||
|
func Float32(v float32) *float32 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 is a helper routine that allocates a new float64 value
|
||||||
|
// to store v and returns a pointer to it.
|
||||||
|
func Float64(v float64) *float64 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32 is a helper routine that allocates a new uint32 value
|
||||||
|
// to store v and returns a pointer to it.
|
||||||
|
func Uint32(v uint32) *uint32 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64 is a helper routine that allocates a new uint64 value
|
||||||
|
// to store v and returns a pointer to it.
|
||||||
|
func Uint64(v uint64) *uint64 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// String is a helper routine that allocates a new string value
|
||||||
|
// to store v and returns a pointer to it.
|
||||||
|
func String(v string) *string {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumName is a helper function to simplify printing protocol buffer enums
|
||||||
|
// by name. Given an enum map and a value, it returns a useful string.
|
||||||
|
func EnumName(m map[int32]string, v int32) string {
|
||||||
|
s, ok := m[v]
|
||||||
|
if ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return strconv.Itoa(int(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
||||||
|
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
||||||
|
// names to its int values, and a byte buffer containing the JSON-encoded
|
||||||
|
// value, it returns an int32 that can be cast to the enum type by the caller.
|
||||||
|
//
|
||||||
|
// The function can deal with both JSON representations, numeric and symbolic.
|
||||||
|
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||||
|
if data[0] == '"' {
|
||||||
|
// New style: enums are strings.
|
||||||
|
var repr string
|
||||||
|
if err := json.Unmarshal(data, &repr); err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
val, ok := m[repr]
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
// Old style: enums are ints.
|
||||||
|
var val int32
|
||||||
|
if err := json.Unmarshal(data, &val); err != nil {
|
||||||
|
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
||||||
|
// including the string s. Used in testing but made available for general debugging.
|
||||||
|
func (p *Buffer) DebugPrint(s string, b []byte) {
|
||||||
|
var u uint64
|
||||||
|
|
||||||
|
obuf := p.buf
|
||||||
|
index := p.index
|
||||||
|
p.buf = b
|
||||||
|
p.index = 0
|
||||||
|
depth := 0
|
||||||
|
|
||||||
|
fmt.Printf("\n--- %s ---\n", s)
|
||||||
|
|
||||||
|
out:
|
||||||
|
for {
|
||||||
|
for i := 0; i < depth; i++ {
|
||||||
|
fmt.Print(" ")
|
||||||
|
}
|
||||||
|
|
||||||
|
index := p.index
|
||||||
|
if index == len(p.buf) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
op, err := p.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
||||||
|
break out
|
||||||
|
}
|
||||||
|
tag := op >> 3
|
||||||
|
wire := op & 7
|
||||||
|
|
||||||
|
switch wire {
|
||||||
|
default:
|
||||||
|
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
||||||
|
index, tag, wire)
|
||||||
|
break out
|
||||||
|
|
||||||
|
case WireBytes:
|
||||||
|
var r []byte
|
||||||
|
|
||||||
|
r, err = p.DecodeRawBytes(false)
|
||||||
|
if err != nil {
|
||||||
|
break out
|
||||||
|
}
|
||||||
|
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
||||||
|
if len(r) <= 6 {
|
||||||
|
for i := 0; i < len(r); i++ {
|
||||||
|
fmt.Printf(" %.2x", r[i])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
fmt.Printf(" %.2x", r[i])
|
||||||
|
}
|
||||||
|
fmt.Printf(" ..")
|
||||||
|
for i := len(r) - 3; i < len(r); i++ {
|
||||||
|
fmt.Printf(" %.2x", r[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Printf("\n")
|
||||||
|
|
||||||
|
case WireFixed32:
|
||||||
|
u, err = p.DecodeFixed32()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
||||||
|
break out
|
||||||
|
}
|
||||||
|
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
||||||
|
|
||||||
|
case WireFixed64:
|
||||||
|
u, err = p.DecodeFixed64()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
||||||
|
break out
|
||||||
|
}
|
||||||
|
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
||||||
|
|
||||||
|
case WireVarint:
|
||||||
|
u, err = p.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
||||||
|
break out
|
||||||
|
}
|
||||||
|
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
||||||
|
|
||||||
|
case WireStartGroup:
|
||||||
|
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
||||||
|
depth++
|
||||||
|
|
||||||
|
case WireEndGroup:
|
||||||
|
depth--
|
||||||
|
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if depth != 0 {
|
||||||
|
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
|
||||||
|
}
|
||||||
|
fmt.Printf("\n")
|
||||||
|
|
||||||
|
p.buf = obuf
|
||||||
|
p.index = index
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDefaults sets unset protocol buffer fields to their default values.
|
||||||
|
// It only modifies fields that are both unset and have defined defaults.
|
||||||
|
// It recursively sets default values in any non-nil sub-messages.
|
||||||
|
func SetDefaults(pb Message) {
|
||||||
|
setDefaults(reflect.ValueOf(pb), true, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// v is a pointer to a struct.
|
||||||
|
func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||||
|
v = v.Elem()
|
||||||
|
|
||||||
|
defaultMu.RLock()
|
||||||
|
dm, ok := defaults[v.Type()]
|
||||||
|
defaultMu.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
dm = buildDefaultMessage(v.Type())
|
||||||
|
defaultMu.Lock()
|
||||||
|
defaults[v.Type()] = dm
|
||||||
|
defaultMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sf := range dm.scalars {
|
||||||
|
f := v.Field(sf.index)
|
||||||
|
if !f.IsNil() {
|
||||||
|
// field already set
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dv := sf.value
|
||||||
|
if dv == nil && !zeros {
|
||||||
|
// no explicit default, and don't want to set zeros
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fptr := f.Addr().Interface() // **T
|
||||||
|
// TODO: Consider batching the allocations we do here.
|
||||||
|
switch sf.kind {
|
||||||
|
case reflect.Bool:
|
||||||
|
b := new(bool)
|
||||||
|
if dv != nil {
|
||||||
|
*b = dv.(bool)
|
||||||
|
}
|
||||||
|
*(fptr.(**bool)) = b
|
||||||
|
case reflect.Float32:
|
||||||
|
f := new(float32)
|
||||||
|
if dv != nil {
|
||||||
|
*f = dv.(float32)
|
||||||
|
}
|
||||||
|
*(fptr.(**float32)) = f
|
||||||
|
case reflect.Float64:
|
||||||
|
f := new(float64)
|
||||||
|
if dv != nil {
|
||||||
|
*f = dv.(float64)
|
||||||
|
}
|
||||||
|
*(fptr.(**float64)) = f
|
||||||
|
case reflect.Int32:
|
||||||
|
// might be an enum
|
||||||
|
if ft := f.Type(); ft != int32PtrType {
|
||||||
|
// enum
|
||||||
|
f.Set(reflect.New(ft.Elem()))
|
||||||
|
if dv != nil {
|
||||||
|
f.Elem().SetInt(int64(dv.(int32)))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// int32 field
|
||||||
|
i := new(int32)
|
||||||
|
if dv != nil {
|
||||||
|
*i = dv.(int32)
|
||||||
|
}
|
||||||
|
*(fptr.(**int32)) = i
|
||||||
|
}
|
||||||
|
case reflect.Int64:
|
||||||
|
i := new(int64)
|
||||||
|
if dv != nil {
|
||||||
|
*i = dv.(int64)
|
||||||
|
}
|
||||||
|
*(fptr.(**int64)) = i
|
||||||
|
case reflect.String:
|
||||||
|
s := new(string)
|
||||||
|
if dv != nil {
|
||||||
|
*s = dv.(string)
|
||||||
|
}
|
||||||
|
*(fptr.(**string)) = s
|
||||||
|
case reflect.Uint8:
|
||||||
|
// exceptional case: []byte
|
||||||
|
var b []byte
|
||||||
|
if dv != nil {
|
||||||
|
db := dv.([]byte)
|
||||||
|
b = make([]byte, len(db))
|
||||||
|
copy(b, db)
|
||||||
|
} else {
|
||||||
|
b = []byte{}
|
||||||
|
}
|
||||||
|
*(fptr.(*[]byte)) = b
|
||||||
|
case reflect.Uint32:
|
||||||
|
u := new(uint32)
|
||||||
|
if dv != nil {
|
||||||
|
*u = dv.(uint32)
|
||||||
|
}
|
||||||
|
*(fptr.(**uint32)) = u
|
||||||
|
case reflect.Uint64:
|
||||||
|
u := new(uint64)
|
||||||
|
if dv != nil {
|
||||||
|
*u = dv.(uint64)
|
||||||
|
}
|
||||||
|
*(fptr.(**uint64)) = u
|
||||||
|
default:
|
||||||
|
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ni := range dm.nested {
|
||||||
|
f := v.Field(ni)
|
||||||
|
// f is *T or []*T or map[T]*T
|
||||||
|
switch f.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
if f.IsNil() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
setDefaults(f, recur, zeros)
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
for i := 0; i < f.Len(); i++ {
|
||||||
|
e := f.Index(i)
|
||||||
|
if e.IsNil() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
setDefaults(e, recur, zeros)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
for _, k := range f.MapKeys() {
|
||||||
|
e := f.MapIndex(k)
|
||||||
|
if e.IsNil() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
setDefaults(e, recur, zeros)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// defaults maps a protocol buffer struct type to a slice of the fields,
|
||||||
|
// with its scalar fields set to their proto-declared non-zero default values.
|
||||||
|
defaultMu sync.RWMutex
|
||||||
|
defaults = make(map[reflect.Type]defaultMessage)
|
||||||
|
|
||||||
|
int32PtrType = reflect.TypeOf((*int32)(nil))
|
||||||
|
)
|
||||||
|
|
||||||
|
// defaultMessage represents information about the default values of a message.
|
||||||
|
type defaultMessage struct {
|
||||||
|
scalars []scalarField
|
||||||
|
nested []int // struct field index of nested messages
|
||||||
|
}
|
||||||
|
|
||||||
|
type scalarField struct {
|
||||||
|
index int // struct field index
|
||||||
|
kind reflect.Kind // element type (the T in *T or []T)
|
||||||
|
value interface{} // the proto-declared default value, or nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// t is a struct type.
|
||||||
|
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||||
|
sprop := GetProperties(t)
|
||||||
|
for _, prop := range sprop.Prop {
|
||||||
|
fi, ok := sprop.decoderTags.get(prop.Tag)
|
||||||
|
if !ok {
|
||||||
|
// XXX_unrecognized
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ft := t.Field(fi).Type
|
||||||
|
|
||||||
|
sf, nested, err := fieldDefault(ft, prop)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
log.Print(err)
|
||||||
|
case nested:
|
||||||
|
dm.nested = append(dm.nested, fi)
|
||||||
|
case sf != nil:
|
||||||
|
sf.index = fi
|
||||||
|
dm.scalars = append(dm.scalars, *sf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dm
|
||||||
|
}
|
||||||
|
|
||||||
|
// fieldDefault returns the scalarField for field type ft.
|
||||||
|
// sf will be nil if the field can not have a default.
|
||||||
|
// nestedMessage will be true if this is a nested message.
|
||||||
|
// Note that sf.index is not set on return.
|
||||||
|
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
|
||||||
|
var canHaveDefault bool
|
||||||
|
switch ft.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
if ft.Elem().Kind() == reflect.Struct {
|
||||||
|
nestedMessage = true
|
||||||
|
} else {
|
||||||
|
canHaveDefault = true // proto2 scalar field
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
switch ft.Elem().Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
nestedMessage = true // repeated message
|
||||||
|
case reflect.Uint8:
|
||||||
|
canHaveDefault = true // bytes field
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
if ft.Elem().Kind() == reflect.Ptr {
|
||||||
|
nestedMessage = true // map with message values
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !canHaveDefault {
|
||||||
|
if nestedMessage {
|
||||||
|
return nil, true, nil
|
||||||
|
}
|
||||||
|
return nil, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// We now know that ft is a pointer or slice.
|
||||||
|
sf = &scalarField{kind: ft.Elem().Kind()}
|
||||||
|
|
||||||
|
// scalar fields without defaults
|
||||||
|
if !prop.HasDefault {
|
||||||
|
return sf, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// a scalar field: either *T or []byte
|
||||||
|
switch ft.Elem().Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
x, err := strconv.ParseBool(prop.Default)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
|
||||||
|
}
|
||||||
|
sf.value = x
|
||||||
|
case reflect.Float32:
|
||||||
|
x, err := strconv.ParseFloat(prop.Default, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
|
||||||
|
}
|
||||||
|
sf.value = float32(x)
|
||||||
|
case reflect.Float64:
|
||||||
|
x, err := strconv.ParseFloat(prop.Default, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
|
||||||
|
}
|
||||||
|
sf.value = x
|
||||||
|
case reflect.Int32:
|
||||||
|
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
|
||||||
|
}
|
||||||
|
sf.value = int32(x)
|
||||||
|
case reflect.Int64:
|
||||||
|
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
|
||||||
|
}
|
||||||
|
sf.value = x
|
||||||
|
case reflect.String:
|
||||||
|
sf.value = prop.Default
|
||||||
|
case reflect.Uint8:
|
||||||
|
// []byte (not *uint8)
|
||||||
|
sf.value = []byte(prop.Default)
|
||||||
|
case reflect.Uint32:
|
||||||
|
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
|
||||||
|
}
|
||||||
|
sf.value = uint32(x)
|
||||||
|
case reflect.Uint64:
|
||||||
|
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
|
||||||
|
}
|
||||||
|
sf.value = x
|
||||||
|
default:
|
||||||
|
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
|
||||||
|
}
|
||||||
|
|
||||||
|
return sf, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map fields may have key types of non-float scalars, strings and enums.
|
||||||
|
// The easiest way to sort them in some deterministic order is to use fmt.
|
||||||
|
// If this turns out to be inefficient we can always consider other options,
|
||||||
|
// such as doing a Schwartzian transform.
|
||||||
|
|
||||||
|
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||||
|
s := mapKeySorter{
|
||||||
|
vs: vs,
|
||||||
|
// default Less function: textual comparison
|
||||||
|
less: func(a, b reflect.Value) bool {
|
||||||
|
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
|
||||||
|
// numeric keys are sorted numerically.
|
||||||
|
if len(vs) == 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
switch vs[0].Kind() {
|
||||||
|
case reflect.Int32, reflect.Int64:
|
||||||
|
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||||
|
case reflect.Uint32, reflect.Uint64:
|
||||||
|
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type mapKeySorter struct {
|
||||||
|
vs []reflect.Value
|
||||||
|
less func(a, b reflect.Value) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s mapKeySorter) Len() int { return len(s.vs) }
|
||||||
|
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
|
||||||
|
func (s mapKeySorter) Less(i, j int) bool {
|
||||||
|
return s.less(s.vs[i], s.vs[j])
|
||||||
|
}
|
||||||
|
|
||||||
|
// isProto3Zero reports whether v is a zero proto3 value.
|
||||||
|
func isProto3Zero(v reflect.Value) bool {
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
return !v.Bool()
|
||||||
|
case reflect.Int32, reflect.Int64:
|
||||||
|
return v.Int() == 0
|
||||||
|
case reflect.Uint32, reflect.Uint64:
|
||||||
|
return v.Uint() == 0
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return v.Float() == 0
|
||||||
|
case reflect.String:
|
||||||
|
return v.String() == ""
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||||
|
// to assert that that code is compatible with this version of the proto package.
|
||||||
|
const ProtoPackageIsVersion2 = true
|
||||||
|
|
||||||
|
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||||
|
// to assert that that code is compatible with this version of the proto package.
|
||||||
|
const ProtoPackageIsVersion1 = true
|
46
vendor/src/github.com/golang/protobuf/proto/map_test.go
vendored
Normal file
46
vendor/src/github.com/golang/protobuf/proto/map_test.go
vendored
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
package proto_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
ppb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
func marshalled() []byte {
|
||||||
|
m := &ppb.IntMaps{}
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
m.Maps = append(m.Maps, &ppb.IntMap{
|
||||||
|
Rtt: map[int32]int32{1: 2},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
b, err := proto.Marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Can't marshal %+v: %v", m, err))
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkConcurrentMapUnmarshal(b *testing.B) {
|
||||||
|
in := marshalled()
|
||||||
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
|
for pb.Next() {
|
||||||
|
var out ppb.IntMaps
|
||||||
|
if err := proto.Unmarshal(in, &out); err != nil {
|
||||||
|
b.Errorf("Can't unmarshal ppb.IntMaps: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSequentialMapUnmarshal(b *testing.B) {
|
||||||
|
in := marshalled()
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
var out ppb.IntMaps
|
||||||
|
if err := proto.Unmarshal(in, &out); err != nil {
|
||||||
|
b.Errorf("Can't unmarshal ppb.IntMaps: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
311
vendor/src/github.com/golang/protobuf/proto/message_set.go
vendored
Normal file
311
vendor/src/github.com/golang/protobuf/proto/message_set.go
vendored
Normal file
|
@ -0,0 +1,311 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Support for message sets.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||||
|
// A message type ID is required for storing a protocol buffer in a message set.
|
||||||
|
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
||||||
|
|
||||||
|
// The first two types (_MessageSet_Item and messageSet)
|
||||||
|
// model what the protocol compiler produces for the following protocol message:
|
||||||
|
// message MessageSet {
|
||||||
|
// repeated group Item = 1 {
|
||||||
|
// required int32 type_id = 2;
|
||||||
|
// required string message = 3;
|
||||||
|
// };
|
||||||
|
// }
|
||||||
|
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||||
|
// because that would introduce a circular dependency between it and this package.
|
||||||
|
|
||||||
|
type _MessageSet_Item struct {
|
||||||
|
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||||
|
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type messageSet struct {
|
||||||
|
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||||
|
XXX_unrecognized []byte
|
||||||
|
// TODO: caching?
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure messageSet is a Message.
|
||||||
|
var _ Message = (*messageSet)(nil)
|
||||||
|
|
||||||
|
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||||
|
// that may be stored in a MessageSet.
|
||||||
|
type messageTypeIder interface {
|
||||||
|
MessageTypeId() int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
||||||
|
mti, ok := pb.(messageTypeIder)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
id := mti.MessageTypeId()
|
||||||
|
for _, item := range ms.Item {
|
||||||
|
if *item.TypeId == id {
|
||||||
|
return item
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *messageSet) Has(pb Message) bool {
|
||||||
|
if ms.find(pb) != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||||
|
if item := ms.find(pb); item != nil {
|
||||||
|
return Unmarshal(item.Message, pb)
|
||||||
|
}
|
||||||
|
if _, ok := pb.(messageTypeIder); !ok {
|
||||||
|
return errNoMessageTypeID
|
||||||
|
}
|
||||||
|
return nil // TODO: return error instead?
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *messageSet) Marshal(pb Message) error {
|
||||||
|
msg, err := Marshal(pb)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if item := ms.find(pb); item != nil {
|
||||||
|
// reuse existing item
|
||||||
|
item.Message = msg
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mti, ok := pb.(messageTypeIder)
|
||||||
|
if !ok {
|
||||||
|
return errNoMessageTypeID
|
||||||
|
}
|
||||||
|
|
||||||
|
mtid := mti.MessageTypeId()
|
||||||
|
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||||
|
TypeId: &mtid,
|
||||||
|
Message: msg,
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
||||||
|
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
||||||
|
func (*messageSet) ProtoMessage() {}
|
||||||
|
|
||||||
|
// Support for the message_set_wire_format message option.
|
||||||
|
|
||||||
|
func skipVarint(buf []byte) []byte {
|
||||||
|
i := 0
|
||||||
|
for ; buf[i]&0x80 != 0; i++ {
|
||||||
|
}
|
||||||
|
return buf[i+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||||
|
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
|
func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
||||||
|
var m map[int32]Extension
|
||||||
|
switch exts := exts.(type) {
|
||||||
|
case *XXX_InternalExtensions:
|
||||||
|
if err := encodeExtensions(exts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m, _ = exts.extensionsRead()
|
||||||
|
case map[int32]Extension:
|
||||||
|
if err := encodeExtensionsMap(exts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m = exts
|
||||||
|
default:
|
||||||
|
return nil, errors.New("proto: not an extension map")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort extension IDs to provide a deterministic encoding.
|
||||||
|
// See also enc_map in encode.go.
|
||||||
|
ids := make([]int, 0, len(m))
|
||||||
|
for id := range m {
|
||||||
|
ids = append(ids, int(id))
|
||||||
|
}
|
||||||
|
sort.Ints(ids)
|
||||||
|
|
||||||
|
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
|
||||||
|
for _, id := range ids {
|
||||||
|
e := m[int32(id)]
|
||||||
|
// Remove the wire type and field number varint, as well as the length varint.
|
||||||
|
msg := skipVarint(skipVarint(e.enc))
|
||||||
|
|
||||||
|
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||||
|
TypeId: Int32(int32(id)),
|
||||||
|
Message: msg,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return Marshal(ms)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||||
|
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
|
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||||
|
var m map[int32]Extension
|
||||||
|
switch exts := exts.(type) {
|
||||||
|
case *XXX_InternalExtensions:
|
||||||
|
m = exts.extensionsWrite()
|
||||||
|
case map[int32]Extension:
|
||||||
|
m = exts
|
||||||
|
default:
|
||||||
|
return errors.New("proto: not an extension map")
|
||||||
|
}
|
||||||
|
|
||||||
|
ms := new(messageSet)
|
||||||
|
if err := Unmarshal(buf, ms); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, item := range ms.Item {
|
||||||
|
id := *item.TypeId
|
||||||
|
msg := item.Message
|
||||||
|
|
||||||
|
// Restore wire type and field number varint, plus length varint.
|
||||||
|
// Be careful to preserve duplicate items.
|
||||||
|
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
||||||
|
if ext, ok := m[id]; ok {
|
||||||
|
// Existing data; rip off the tag and length varint
|
||||||
|
// so we join the new data correctly.
|
||||||
|
// We can assume that ext.enc is set because we are unmarshaling.
|
||||||
|
o := ext.enc[len(b):] // skip wire type and field number
|
||||||
|
_, n := DecodeVarint(o) // calculate length of length varint
|
||||||
|
o = o[n:] // skip length varint
|
||||||
|
msg = append(o, msg...) // join old data and new data
|
||||||
|
}
|
||||||
|
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
||||||
|
b = append(b, msg...)
|
||||||
|
|
||||||
|
m[id] = Extension{enc: b}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
||||||
|
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
|
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
||||||
|
var m map[int32]Extension
|
||||||
|
switch exts := exts.(type) {
|
||||||
|
case *XXX_InternalExtensions:
|
||||||
|
m, _ = exts.extensionsRead()
|
||||||
|
case map[int32]Extension:
|
||||||
|
m = exts
|
||||||
|
default:
|
||||||
|
return nil, errors.New("proto: not an extension map")
|
||||||
|
}
|
||||||
|
var b bytes.Buffer
|
||||||
|
b.WriteByte('{')
|
||||||
|
|
||||||
|
// Process the map in key order for deterministic output.
|
||||||
|
ids := make([]int32, 0, len(m))
|
||||||
|
for id := range m {
|
||||||
|
ids = append(ids, id)
|
||||||
|
}
|
||||||
|
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
|
||||||
|
|
||||||
|
for i, id := range ids {
|
||||||
|
ext := m[id]
|
||||||
|
if i > 0 {
|
||||||
|
b.WriteByte(',')
|
||||||
|
}
|
||||||
|
|
||||||
|
msd, ok := messageSetMap[id]
|
||||||
|
if !ok {
|
||||||
|
// Unknown type; we can't render it, so skip it.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
||||||
|
|
||||||
|
x := ext.value
|
||||||
|
if x == nil {
|
||||||
|
x = reflect.New(msd.t.Elem()).Interface()
|
||||||
|
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d, err := json.Marshal(x)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b.Write(d)
|
||||||
|
}
|
||||||
|
b.WriteByte('}')
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
||||||
|
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
|
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
|
||||||
|
// Common-case fast path.
|
||||||
|
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is fairly tricky, and it's not clear that it is needed.
|
||||||
|
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A global registry of types that can be used in a MessageSet.
|
||||||
|
|
||||||
|
var messageSetMap = make(map[int32]messageSetDesc)
|
||||||
|
|
||||||
|
type messageSetDesc struct {
|
||||||
|
t reflect.Type // pointer to struct
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterMessageSetType is called from the generated code.
|
||||||
|
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
|
||||||
|
messageSetMap[fieldNum] = messageSetDesc{
|
||||||
|
t: reflect.TypeOf(m),
|
||||||
|
name: name,
|
||||||
|
}
|
||||||
|
}
|
66
vendor/src/github.com/golang/protobuf/proto/message_set_test.go
vendored
Normal file
66
vendor/src/github.com/golang/protobuf/proto/message_set_test.go
vendored
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUnmarshalMessageSetWithDuplicate(t *testing.T) {
|
||||||
|
// Check that a repeated message set entry will be concatenated.
|
||||||
|
in := &messageSet{
|
||||||
|
Item: []*_MessageSet_Item{
|
||||||
|
{TypeId: Int32(12345), Message: []byte("hoo")},
|
||||||
|
{TypeId: Int32(12345), Message: []byte("hah")},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
b, err := Marshal(in)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Marshal: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Marshaled bytes: %q", b)
|
||||||
|
|
||||||
|
var extensions XXX_InternalExtensions
|
||||||
|
if err := UnmarshalMessageSet(b, &extensions); err != nil {
|
||||||
|
t.Fatalf("UnmarshalMessageSet: %v", err)
|
||||||
|
}
|
||||||
|
ext, ok := extensions.p.extensionMap[12345]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap)
|
||||||
|
}
|
||||||
|
// Skip wire type/field number and length varints.
|
||||||
|
got := skipVarint(skipVarint(ext.enc))
|
||||||
|
if want := []byte("hoohah"); !bytes.Equal(got, want) {
|
||||||
|
t.Errorf("Combined extension is %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
}
|
484
vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go
vendored
Normal file
484
vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go
vendored
Normal file
|
@ -0,0 +1,484 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// +build appengine js
|
||||||
|
|
||||||
|
// This file contains an implementation of proto field accesses using package reflect.
|
||||||
|
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||||
|
// be used on App Engine.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A structPointer is a pointer to a struct.
|
||||||
|
type structPointer struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||||
|
// The reflect value must itself be a pointer to a struct.
|
||||||
|
func toStructPointer(v reflect.Value) structPointer {
|
||||||
|
return structPointer{v}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil reports whether p is nil.
|
||||||
|
func structPointer_IsNil(p structPointer) bool {
|
||||||
|
return p.v.IsNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interface returns the struct pointer as an interface value.
|
||||||
|
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
|
||||||
|
return p.v.Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
// A field identifies a field in a struct, accessible from a structPointer.
|
||||||
|
// In this implementation, a field is identified by the sequence of field indices
|
||||||
|
// passed to reflect's FieldByIndex.
|
||||||
|
type field []int
|
||||||
|
|
||||||
|
// toField returns a field equivalent to the given reflect field.
|
||||||
|
func toField(f *reflect.StructField) field {
|
||||||
|
return f.Index
|
||||||
|
}
|
||||||
|
|
||||||
|
// invalidField is an invalid field identifier.
|
||||||
|
var invalidField = field(nil)
|
||||||
|
|
||||||
|
// IsValid reports whether the field identifier is valid.
|
||||||
|
func (f field) IsValid() bool { return f != nil }
|
||||||
|
|
||||||
|
// field returns the given field in the struct as a reflect value.
|
||||||
|
func structPointer_field(p structPointer, f field) reflect.Value {
|
||||||
|
// Special case: an extension map entry with a value of type T
|
||||||
|
// passes a *T to the struct-handling code with a zero field,
|
||||||
|
// expecting that it will be treated as equivalent to *struct{ X T },
|
||||||
|
// which has the same memory layout. We have to handle that case
|
||||||
|
// specially, because reflect will panic if we call FieldByIndex on a
|
||||||
|
// non-struct.
|
||||||
|
if f == nil {
|
||||||
|
return p.v.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.v.Elem().FieldByIndex(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ifield returns the given field in the struct as an interface value.
|
||||||
|
func structPointer_ifield(p structPointer, f field) interface{} {
|
||||||
|
return structPointer_field(p, f).Addr().Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns the address of a []byte field in the struct.
|
||||||
|
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||||
|
return structPointer_ifield(p, f).(*[]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||||
|
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||||
|
return structPointer_ifield(p, f).(*[][]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool returns the address of a *bool field in the struct.
|
||||||
|
func structPointer_Bool(p structPointer, f field) **bool {
|
||||||
|
return structPointer_ifield(p, f).(**bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolVal returns the address of a bool field in the struct.
|
||||||
|
func structPointer_BoolVal(p structPointer, f field) *bool {
|
||||||
|
return structPointer_ifield(p, f).(*bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolSlice returns the address of a []bool field in the struct.
|
||||||
|
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||||
|
return structPointer_ifield(p, f).(*[]bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the address of a *string field in the struct.
|
||||||
|
func structPointer_String(p structPointer, f field) **string {
|
||||||
|
return structPointer_ifield(p, f).(**string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringVal returns the address of a string field in the struct.
|
||||||
|
func structPointer_StringVal(p structPointer, f field) *string {
|
||||||
|
return structPointer_ifield(p, f).(*string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringSlice returns the address of a []string field in the struct.
|
||||||
|
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||||
|
return structPointer_ifield(p, f).(*[]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extensions returns the address of an extension map field in the struct.
|
||||||
|
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
|
||||||
|
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtMap returns the address of an extension map field in the struct.
|
||||||
|
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||||
|
return structPointer_ifield(p, f).(*map[int32]Extension)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
||||||
|
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
||||||
|
return structPointer_field(p, f).Addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStructPointer writes a *struct field in the struct.
|
||||||
|
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||||
|
structPointer_field(p, f).Set(q.v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStructPointer reads a *struct field in the struct.
|
||||||
|
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||||
|
return structPointer{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StructPointerSlice the address of a []*struct field in the struct.
|
||||||
|
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
|
||||||
|
return structPointerSlice{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A structPointerSlice represents the address of a slice of pointers to structs
|
||||||
|
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
|
||||||
|
type structPointerSlice struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p structPointerSlice) Len() int { return p.v.Len() }
|
||||||
|
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
|
||||||
|
func (p structPointerSlice) Append(q structPointer) {
|
||||||
|
p.v.Set(reflect.Append(p.v, q.v))
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
int32Type = reflect.TypeOf(int32(0))
|
||||||
|
uint32Type = reflect.TypeOf(uint32(0))
|
||||||
|
float32Type = reflect.TypeOf(float32(0))
|
||||||
|
int64Type = reflect.TypeOf(int64(0))
|
||||||
|
uint64Type = reflect.TypeOf(uint64(0))
|
||||||
|
float64Type = reflect.TypeOf(float64(0))
|
||||||
|
)
|
||||||
|
|
||||||
|
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
|
||||||
|
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
|
||||||
|
type word32 struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil reports whether p is nil.
|
||||||
|
func word32_IsNil(p word32) bool {
|
||||||
|
return p.v.IsNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets p to point at a newly allocated word with bits set to x.
|
||||||
|
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||||
|
t := p.v.Type().Elem()
|
||||||
|
switch t {
|
||||||
|
case int32Type:
|
||||||
|
if len(o.int32s) == 0 {
|
||||||
|
o.int32s = make([]int32, uint32PoolSize)
|
||||||
|
}
|
||||||
|
o.int32s[0] = int32(x)
|
||||||
|
p.v.Set(reflect.ValueOf(&o.int32s[0]))
|
||||||
|
o.int32s = o.int32s[1:]
|
||||||
|
return
|
||||||
|
case uint32Type:
|
||||||
|
if len(o.uint32s) == 0 {
|
||||||
|
o.uint32s = make([]uint32, uint32PoolSize)
|
||||||
|
}
|
||||||
|
o.uint32s[0] = x
|
||||||
|
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
|
||||||
|
o.uint32s = o.uint32s[1:]
|
||||||
|
return
|
||||||
|
case float32Type:
|
||||||
|
if len(o.float32s) == 0 {
|
||||||
|
o.float32s = make([]float32, uint32PoolSize)
|
||||||
|
}
|
||||||
|
o.float32s[0] = math.Float32frombits(x)
|
||||||
|
p.v.Set(reflect.ValueOf(&o.float32s[0]))
|
||||||
|
o.float32s = o.float32s[1:]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// must be enum
|
||||||
|
p.v.Set(reflect.New(t))
|
||||||
|
p.v.Elem().SetInt(int64(int32(x)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the bits pointed at by p, as a uint32.
|
||||||
|
func word32_Get(p word32) uint32 {
|
||||||
|
elem := p.v.Elem()
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int32:
|
||||||
|
return uint32(elem.Int())
|
||||||
|
case reflect.Uint32:
|
||||||
|
return uint32(elem.Uint())
|
||||||
|
case reflect.Float32:
|
||||||
|
return math.Float32bits(float32(elem.Float()))
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
|
||||||
|
func structPointer_Word32(p structPointer, f field) word32 {
|
||||||
|
return word32{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A word32Val represents a field of type int32, uint32, float32, or enum.
|
||||||
|
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
|
||||||
|
type word32Val struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets *p to x.
|
||||||
|
func word32Val_Set(p word32Val, x uint32) {
|
||||||
|
switch p.v.Type() {
|
||||||
|
case int32Type:
|
||||||
|
p.v.SetInt(int64(x))
|
||||||
|
return
|
||||||
|
case uint32Type:
|
||||||
|
p.v.SetUint(uint64(x))
|
||||||
|
return
|
||||||
|
case float32Type:
|
||||||
|
p.v.SetFloat(float64(math.Float32frombits(x)))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// must be enum
|
||||||
|
p.v.SetInt(int64(int32(x)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the bits pointed at by p, as a uint32.
|
||||||
|
func word32Val_Get(p word32Val) uint32 {
|
||||||
|
elem := p.v
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int32:
|
||||||
|
return uint32(elem.Int())
|
||||||
|
case reflect.Uint32:
|
||||||
|
return uint32(elem.Uint())
|
||||||
|
case reflect.Float32:
|
||||||
|
return math.Float32bits(float32(elem.Float()))
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
|
||||||
|
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
||||||
|
return word32Val{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A word32Slice is a slice of 32-bit values.
|
||||||
|
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
|
||||||
|
type word32Slice struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p word32Slice) Append(x uint32) {
|
||||||
|
n, m := p.v.Len(), p.v.Cap()
|
||||||
|
if n < m {
|
||||||
|
p.v.SetLen(n + 1)
|
||||||
|
} else {
|
||||||
|
t := p.v.Type().Elem()
|
||||||
|
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||||
|
}
|
||||||
|
elem := p.v.Index(n)
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int32:
|
||||||
|
elem.SetInt(int64(int32(x)))
|
||||||
|
case reflect.Uint32:
|
||||||
|
elem.SetUint(uint64(x))
|
||||||
|
case reflect.Float32:
|
||||||
|
elem.SetFloat(float64(math.Float32frombits(x)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p word32Slice) Len() int {
|
||||||
|
return p.v.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p word32Slice) Index(i int) uint32 {
|
||||||
|
elem := p.v.Index(i)
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int32:
|
||||||
|
return uint32(elem.Int())
|
||||||
|
case reflect.Uint32:
|
||||||
|
return uint32(elem.Uint())
|
||||||
|
case reflect.Float32:
|
||||||
|
return math.Float32bits(float32(elem.Float()))
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
|
||||||
|
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
|
||||||
|
return word32Slice{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// word64 is like word32 but for 64-bit values.
|
||||||
|
type word64 struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||||
|
t := p.v.Type().Elem()
|
||||||
|
switch t {
|
||||||
|
case int64Type:
|
||||||
|
if len(o.int64s) == 0 {
|
||||||
|
o.int64s = make([]int64, uint64PoolSize)
|
||||||
|
}
|
||||||
|
o.int64s[0] = int64(x)
|
||||||
|
p.v.Set(reflect.ValueOf(&o.int64s[0]))
|
||||||
|
o.int64s = o.int64s[1:]
|
||||||
|
return
|
||||||
|
case uint64Type:
|
||||||
|
if len(o.uint64s) == 0 {
|
||||||
|
o.uint64s = make([]uint64, uint64PoolSize)
|
||||||
|
}
|
||||||
|
o.uint64s[0] = x
|
||||||
|
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
|
||||||
|
o.uint64s = o.uint64s[1:]
|
||||||
|
return
|
||||||
|
case float64Type:
|
||||||
|
if len(o.float64s) == 0 {
|
||||||
|
o.float64s = make([]float64, uint64PoolSize)
|
||||||
|
}
|
||||||
|
o.float64s[0] = math.Float64frombits(x)
|
||||||
|
p.v.Set(reflect.ValueOf(&o.float64s[0]))
|
||||||
|
o.float64s = o.float64s[1:]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64_IsNil(p word64) bool {
|
||||||
|
return p.v.IsNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64_Get(p word64) uint64 {
|
||||||
|
elem := p.v.Elem()
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int64:
|
||||||
|
return uint64(elem.Int())
|
||||||
|
case reflect.Uint64:
|
||||||
|
return elem.Uint()
|
||||||
|
case reflect.Float64:
|
||||||
|
return math.Float64bits(elem.Float())
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func structPointer_Word64(p structPointer, f field) word64 {
|
||||||
|
return word64{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// word64Val is like word32Val but for 64-bit values.
|
||||||
|
type word64Val struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
||||||
|
switch p.v.Type() {
|
||||||
|
case int64Type:
|
||||||
|
p.v.SetInt(int64(x))
|
||||||
|
return
|
||||||
|
case uint64Type:
|
||||||
|
p.v.SetUint(x)
|
||||||
|
return
|
||||||
|
case float64Type:
|
||||||
|
p.v.SetFloat(math.Float64frombits(x))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64Val_Get(p word64Val) uint64 {
|
||||||
|
elem := p.v
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int64:
|
||||||
|
return uint64(elem.Int())
|
||||||
|
case reflect.Uint64:
|
||||||
|
return elem.Uint()
|
||||||
|
case reflect.Float64:
|
||||||
|
return math.Float64bits(elem.Float())
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
||||||
|
return word64Val{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
type word64Slice struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p word64Slice) Append(x uint64) {
|
||||||
|
n, m := p.v.Len(), p.v.Cap()
|
||||||
|
if n < m {
|
||||||
|
p.v.SetLen(n + 1)
|
||||||
|
} else {
|
||||||
|
t := p.v.Type().Elem()
|
||||||
|
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||||
|
}
|
||||||
|
elem := p.v.Index(n)
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int64:
|
||||||
|
elem.SetInt(int64(int64(x)))
|
||||||
|
case reflect.Uint64:
|
||||||
|
elem.SetUint(uint64(x))
|
||||||
|
case reflect.Float64:
|
||||||
|
elem.SetFloat(float64(math.Float64frombits(x)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p word64Slice) Len() int {
|
||||||
|
return p.v.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p word64Slice) Index(i int) uint64 {
|
||||||
|
elem := p.v.Index(i)
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int64:
|
||||||
|
return uint64(elem.Int())
|
||||||
|
case reflect.Uint64:
|
||||||
|
return uint64(elem.Uint())
|
||||||
|
case reflect.Float64:
|
||||||
|
return math.Float64bits(float64(elem.Float()))
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
|
||||||
|
return word64Slice{structPointer_field(p, f)}
|
||||||
|
}
|
270
vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go
vendored
Normal file
270
vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go
vendored
Normal file
|
@ -0,0 +1,270 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// +build !appengine,!js
|
||||||
|
|
||||||
|
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NOTE: These type_Foo functions would more idiomatically be methods,
|
||||||
|
// but Go does not allow methods on pointer types, and we must preserve
|
||||||
|
// some pointer type for the garbage collector. We use these
|
||||||
|
// funcs with clunky names as our poor approximation to methods.
|
||||||
|
//
|
||||||
|
// An alternative would be
|
||||||
|
// type structPointer struct { p unsafe.Pointer }
|
||||||
|
// but that does not registerize as well.
|
||||||
|
|
||||||
|
// A structPointer is a pointer to a struct.
|
||||||
|
type structPointer unsafe.Pointer
|
||||||
|
|
||||||
|
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||||
|
func toStructPointer(v reflect.Value) structPointer {
|
||||||
|
return structPointer(unsafe.Pointer(v.Pointer()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil reports whether p is nil.
|
||||||
|
func structPointer_IsNil(p structPointer) bool {
|
||||||
|
return p == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interface returns the struct pointer, assumed to have element type t,
|
||||||
|
// as an interface value.
|
||||||
|
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
|
||||||
|
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
// A field identifies a field in a struct, accessible from a structPointer.
|
||||||
|
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||||
|
type field uintptr
|
||||||
|
|
||||||
|
// toField returns a field equivalent to the given reflect field.
|
||||||
|
func toField(f *reflect.StructField) field {
|
||||||
|
return field(f.Offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// invalidField is an invalid field identifier.
|
||||||
|
const invalidField = ^field(0)
|
||||||
|
|
||||||
|
// IsValid reports whether the field identifier is valid.
|
||||||
|
func (f field) IsValid() bool {
|
||||||
|
return f != ^field(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns the address of a []byte field in the struct.
|
||||||
|
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||||
|
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||||
|
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||||
|
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool returns the address of a *bool field in the struct.
|
||||||
|
func structPointer_Bool(p structPointer, f field) **bool {
|
||||||
|
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolVal returns the address of a bool field in the struct.
|
||||||
|
func structPointer_BoolVal(p structPointer, f field) *bool {
|
||||||
|
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolSlice returns the address of a []bool field in the struct.
|
||||||
|
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||||
|
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the address of a *string field in the struct.
|
||||||
|
func structPointer_String(p structPointer, f field) **string {
|
||||||
|
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringVal returns the address of a string field in the struct.
|
||||||
|
func structPointer_StringVal(p structPointer, f field) *string {
|
||||||
|
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringSlice returns the address of a []string field in the struct.
|
||||||
|
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||||
|
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtMap returns the address of an extension map field in the struct.
|
||||||
|
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
|
||||||
|
return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||||
|
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
||||||
|
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
||||||
|
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStructPointer writes a *struct field in the struct.
|
||||||
|
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||||
|
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStructPointer reads a *struct field in the struct.
|
||||||
|
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||||
|
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StructPointerSlice the address of a []*struct field in the struct.
|
||||||
|
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
|
||||||
|
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
|
||||||
|
type structPointerSlice []structPointer
|
||||||
|
|
||||||
|
func (v *structPointerSlice) Len() int { return len(*v) }
|
||||||
|
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
|
||||||
|
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
|
||||||
|
|
||||||
|
// A word32 is the address of a "pointer to 32-bit value" field.
|
||||||
|
type word32 **uint32
|
||||||
|
|
||||||
|
// IsNil reports whether *v is nil.
|
||||||
|
func word32_IsNil(p word32) bool {
|
||||||
|
return *p == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets *v to point at a newly allocated word set to x.
|
||||||
|
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||||
|
if len(o.uint32s) == 0 {
|
||||||
|
o.uint32s = make([]uint32, uint32PoolSize)
|
||||||
|
}
|
||||||
|
o.uint32s[0] = x
|
||||||
|
*p = &o.uint32s[0]
|
||||||
|
o.uint32s = o.uint32s[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the value pointed at by *v.
|
||||||
|
func word32_Get(p word32) uint32 {
|
||||||
|
return **p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
||||||
|
func structPointer_Word32(p structPointer, f field) word32 {
|
||||||
|
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A word32Val is the address of a 32-bit value field.
|
||||||
|
type word32Val *uint32
|
||||||
|
|
||||||
|
// Set sets *p to x.
|
||||||
|
func word32Val_Set(p word32Val, x uint32) {
|
||||||
|
*p = x
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the value pointed at by p.
|
||||||
|
func word32Val_Get(p word32Val) uint32 {
|
||||||
|
return *p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
||||||
|
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
||||||
|
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A word32Slice is a slice of 32-bit values.
|
||||||
|
type word32Slice []uint32
|
||||||
|
|
||||||
|
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
|
||||||
|
func (v *word32Slice) Len() int { return len(*v) }
|
||||||
|
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
|
||||||
|
|
||||||
|
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
|
||||||
|
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
|
||||||
|
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// word64 is like word32 but for 64-bit values.
|
||||||
|
type word64 **uint64
|
||||||
|
|
||||||
|
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||||
|
if len(o.uint64s) == 0 {
|
||||||
|
o.uint64s = make([]uint64, uint64PoolSize)
|
||||||
|
}
|
||||||
|
o.uint64s[0] = x
|
||||||
|
*p = &o.uint64s[0]
|
||||||
|
o.uint64s = o.uint64s[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64_IsNil(p word64) bool {
|
||||||
|
return *p == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64_Get(p word64) uint64 {
|
||||||
|
return **p
|
||||||
|
}
|
||||||
|
|
||||||
|
func structPointer_Word64(p structPointer, f field) word64 {
|
||||||
|
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// word64Val is like word32Val but for 64-bit values.
|
||||||
|
type word64Val *uint64
|
||||||
|
|
||||||
|
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
||||||
|
*p = x
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64Val_Get(p word64Val) uint64 {
|
||||||
|
return *p
|
||||||
|
}
|
||||||
|
|
||||||
|
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
||||||
|
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// word64Slice is like word32Slice but for 64-bit values.
|
||||||
|
type word64Slice []uint64
|
||||||
|
|
||||||
|
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
|
||||||
|
func (v *word64Slice) Len() int { return len(*v) }
|
||||||
|
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
|
||||||
|
|
||||||
|
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
|
||||||
|
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
|
}
|
872
vendor/src/github.com/golang/protobuf/proto/properties.go
vendored
Normal file
872
vendor/src/github.com/golang/protobuf/proto/properties.go
vendored
Normal file
|
@ -0,0 +1,872 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Routines for encoding data into the wire format for protocol buffers.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const debug bool = false
|
||||||
|
|
||||||
|
// Constants that identify the encoding of a value on the wire.
|
||||||
|
const (
|
||||||
|
WireVarint = 0
|
||||||
|
WireFixed64 = 1
|
||||||
|
WireBytes = 2
|
||||||
|
WireStartGroup = 3
|
||||||
|
WireEndGroup = 4
|
||||||
|
WireFixed32 = 5
|
||||||
|
)
|
||||||
|
|
||||||
|
const startSize = 10 // initial slice/string sizes
|
||||||
|
|
||||||
|
// Encoders are defined in encode.go
|
||||||
|
// An encoder outputs the full representation of a field, including its
|
||||||
|
// tag and encoder type.
|
||||||
|
type encoder func(p *Buffer, prop *Properties, base structPointer) error
|
||||||
|
|
||||||
|
// A valueEncoder encodes a single integer in a particular encoding.
|
||||||
|
type valueEncoder func(o *Buffer, x uint64) error
|
||||||
|
|
||||||
|
// Sizers are defined in encode.go
|
||||||
|
// A sizer returns the encoded size of a field, including its tag and encoder
|
||||||
|
// type.
|
||||||
|
type sizer func(prop *Properties, base structPointer) int
|
||||||
|
|
||||||
|
// A valueSizer returns the encoded size of a single integer in a particular
|
||||||
|
// encoding.
|
||||||
|
type valueSizer func(x uint64) int
|
||||||
|
|
||||||
|
// Decoders are defined in decode.go
|
||||||
|
// A decoder creates a value from its wire representation.
|
||||||
|
// Unrecognized subelements are saved in unrec.
|
||||||
|
type decoder func(p *Buffer, prop *Properties, base structPointer) error
|
||||||
|
|
||||||
|
// A valueDecoder decodes a single integer in a particular encoding.
|
||||||
|
type valueDecoder func(o *Buffer) (x uint64, err error)
|
||||||
|
|
||||||
|
// A oneofMarshaler does the marshaling for all oneof fields in a message.
|
||||||
|
type oneofMarshaler func(Message, *Buffer) error
|
||||||
|
|
||||||
|
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
|
||||||
|
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
|
||||||
|
|
||||||
|
// A oneofSizer does the sizing for all oneof fields in a message.
|
||||||
|
type oneofSizer func(Message) int
|
||||||
|
|
||||||
|
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||||
|
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||||
|
// numbers.
|
||||||
|
type tagMap struct {
|
||||||
|
fastTags []int
|
||||||
|
slowTags map[int]int
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
||||||
|
// the tagMap slice rather than its map.
|
||||||
|
const tagMapFastLimit = 1024
|
||||||
|
|
||||||
|
func (p *tagMap) get(t int) (int, bool) {
|
||||||
|
if t > 0 && t < tagMapFastLimit {
|
||||||
|
if t >= len(p.fastTags) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
fi := p.fastTags[t]
|
||||||
|
return fi, fi >= 0
|
||||||
|
}
|
||||||
|
fi, ok := p.slowTags[t]
|
||||||
|
return fi, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *tagMap) put(t int, fi int) {
|
||||||
|
if t > 0 && t < tagMapFastLimit {
|
||||||
|
for len(p.fastTags) < t+1 {
|
||||||
|
p.fastTags = append(p.fastTags, -1)
|
||||||
|
}
|
||||||
|
p.fastTags[t] = fi
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if p.slowTags == nil {
|
||||||
|
p.slowTags = make(map[int]int)
|
||||||
|
}
|
||||||
|
p.slowTags[t] = fi
|
||||||
|
}
|
||||||
|
|
||||||
|
// StructProperties represents properties for all the fields of a struct.
|
||||||
|
// decoderTags and decoderOrigNames should only be used by the decoder.
|
||||||
|
type StructProperties struct {
|
||||||
|
Prop []*Properties // properties for each field
|
||||||
|
reqCount int // required count
|
||||||
|
decoderTags tagMap // map from proto tag to struct field number
|
||||||
|
decoderOrigNames map[string]int // map from original name to struct field number
|
||||||
|
order []int // list of struct field numbers in tag order
|
||||||
|
unrecField field // field id of the XXX_unrecognized []byte field
|
||||||
|
extendable bool // is this an extendable proto
|
||||||
|
|
||||||
|
oneofMarshaler oneofMarshaler
|
||||||
|
oneofUnmarshaler oneofUnmarshaler
|
||||||
|
oneofSizer oneofSizer
|
||||||
|
stype reflect.Type
|
||||||
|
|
||||||
|
// OneofTypes contains information about the oneof fields in this message.
|
||||||
|
// It is keyed by the original name of a field.
|
||||||
|
OneofTypes map[string]*OneofProperties
|
||||||
|
}
|
||||||
|
|
||||||
|
// OneofProperties represents information about a specific field in a oneof.
|
||||||
|
type OneofProperties struct {
|
||||||
|
Type reflect.Type // pointer to generated struct type for this oneof field
|
||||||
|
Field int // struct field number of the containing oneof in the message
|
||||||
|
Prop *Properties
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
||||||
|
// See encode.go, (*Buffer).enc_struct.
|
||||||
|
|
||||||
|
func (sp *StructProperties) Len() int { return len(sp.order) }
|
||||||
|
func (sp *StructProperties) Less(i, j int) bool {
|
||||||
|
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
||||||
|
}
|
||||||
|
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
||||||
|
|
||||||
|
// Properties represents the protocol-specific behavior of a single struct field.
|
||||||
|
type Properties struct {
|
||||||
|
Name string // name of the field, for error messages
|
||||||
|
OrigName string // original name before protocol compiler (always set)
|
||||||
|
JSONName string // name to use for JSON; determined by protoc
|
||||||
|
Wire string
|
||||||
|
WireType int
|
||||||
|
Tag int
|
||||||
|
Required bool
|
||||||
|
Optional bool
|
||||||
|
Repeated bool
|
||||||
|
Packed bool // relevant for repeated primitives only
|
||||||
|
Enum string // set for enum types only
|
||||||
|
proto3 bool // whether this is known to be a proto3 field; set for []byte only
|
||||||
|
oneof bool // whether this is a oneof field
|
||||||
|
|
||||||
|
Default string // default value
|
||||||
|
HasDefault bool // whether an explicit default was provided
|
||||||
|
def_uint64 uint64
|
||||||
|
|
||||||
|
enc encoder
|
||||||
|
valEnc valueEncoder // set for bool and numeric types only
|
||||||
|
field field
|
||||||
|
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
|
||||||
|
tagbuf [8]byte
|
||||||
|
stype reflect.Type // set for struct types only
|
||||||
|
sprop *StructProperties // set for struct types only
|
||||||
|
isMarshaler bool
|
||||||
|
isUnmarshaler bool
|
||||||
|
|
||||||
|
mtype reflect.Type // set for map types only
|
||||||
|
mkeyprop *Properties // set for map types only
|
||||||
|
mvalprop *Properties // set for map types only
|
||||||
|
|
||||||
|
size sizer
|
||||||
|
valSize valueSizer // set for bool and numeric types only
|
||||||
|
|
||||||
|
dec decoder
|
||||||
|
valDec valueDecoder // set for bool and numeric types only
|
||||||
|
|
||||||
|
// If this is a packable field, this will be the decoder for the packed version of the field.
|
||||||
|
packedDec decoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// String formats the properties in the protobuf struct field tag style.
|
||||||
|
func (p *Properties) String() string {
|
||||||
|
s := p.Wire
|
||||||
|
s = ","
|
||||||
|
s += strconv.Itoa(p.Tag)
|
||||||
|
if p.Required {
|
||||||
|
s += ",req"
|
||||||
|
}
|
||||||
|
if p.Optional {
|
||||||
|
s += ",opt"
|
||||||
|
}
|
||||||
|
if p.Repeated {
|
||||||
|
s += ",rep"
|
||||||
|
}
|
||||||
|
if p.Packed {
|
||||||
|
s += ",packed"
|
||||||
|
}
|
||||||
|
s += ",name=" + p.OrigName
|
||||||
|
if p.JSONName != p.OrigName {
|
||||||
|
s += ",json=" + p.JSONName
|
||||||
|
}
|
||||||
|
if p.proto3 {
|
||||||
|
s += ",proto3"
|
||||||
|
}
|
||||||
|
if p.oneof {
|
||||||
|
s += ",oneof"
|
||||||
|
}
|
||||||
|
if len(p.Enum) > 0 {
|
||||||
|
s += ",enum=" + p.Enum
|
||||||
|
}
|
||||||
|
if p.HasDefault {
|
||||||
|
s += ",def=" + p.Default
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||||
|
func (p *Properties) Parse(s string) {
|
||||||
|
// "bytes,49,opt,name=foo,def=hello!"
|
||||||
|
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||||
|
if len(fields) < 2 {
|
||||||
|
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Wire = fields[0]
|
||||||
|
switch p.Wire {
|
||||||
|
case "varint":
|
||||||
|
p.WireType = WireVarint
|
||||||
|
p.valEnc = (*Buffer).EncodeVarint
|
||||||
|
p.valDec = (*Buffer).DecodeVarint
|
||||||
|
p.valSize = sizeVarint
|
||||||
|
case "fixed32":
|
||||||
|
p.WireType = WireFixed32
|
||||||
|
p.valEnc = (*Buffer).EncodeFixed32
|
||||||
|
p.valDec = (*Buffer).DecodeFixed32
|
||||||
|
p.valSize = sizeFixed32
|
||||||
|
case "fixed64":
|
||||||
|
p.WireType = WireFixed64
|
||||||
|
p.valEnc = (*Buffer).EncodeFixed64
|
||||||
|
p.valDec = (*Buffer).DecodeFixed64
|
||||||
|
p.valSize = sizeFixed64
|
||||||
|
case "zigzag32":
|
||||||
|
p.WireType = WireVarint
|
||||||
|
p.valEnc = (*Buffer).EncodeZigzag32
|
||||||
|
p.valDec = (*Buffer).DecodeZigzag32
|
||||||
|
p.valSize = sizeZigzag32
|
||||||
|
case "zigzag64":
|
||||||
|
p.WireType = WireVarint
|
||||||
|
p.valEnc = (*Buffer).EncodeZigzag64
|
||||||
|
p.valDec = (*Buffer).DecodeZigzag64
|
||||||
|
p.valSize = sizeZigzag64
|
||||||
|
case "bytes", "group":
|
||||||
|
p.WireType = WireBytes
|
||||||
|
// no numeric converter for non-numeric types
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
p.Tag, err = strconv.Atoi(fields[1])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 2; i < len(fields); i++ {
|
||||||
|
f := fields[i]
|
||||||
|
switch {
|
||||||
|
case f == "req":
|
||||||
|
p.Required = true
|
||||||
|
case f == "opt":
|
||||||
|
p.Optional = true
|
||||||
|
case f == "rep":
|
||||||
|
p.Repeated = true
|
||||||
|
case f == "packed":
|
||||||
|
p.Packed = true
|
||||||
|
case strings.HasPrefix(f, "name="):
|
||||||
|
p.OrigName = f[5:]
|
||||||
|
case strings.HasPrefix(f, "json="):
|
||||||
|
p.JSONName = f[5:]
|
||||||
|
case strings.HasPrefix(f, "enum="):
|
||||||
|
p.Enum = f[5:]
|
||||||
|
case f == "proto3":
|
||||||
|
p.proto3 = true
|
||||||
|
case f == "oneof":
|
||||||
|
p.oneof = true
|
||||||
|
case strings.HasPrefix(f, "def="):
|
||||||
|
p.HasDefault = true
|
||||||
|
p.Default = f[4:] // rest of string
|
||||||
|
if i+1 < len(fields) {
|
||||||
|
// Commas aren't escaped, and def is always last.
|
||||||
|
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func logNoSliceEnc(t1, t2 reflect.Type) {
|
||||||
|
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
|
||||||
|
}
|
||||||
|
|
||||||
|
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||||
|
|
||||||
|
// Initialize the fields for encoding and decoding.
|
||||||
|
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||||
|
p.enc = nil
|
||||||
|
p.dec = nil
|
||||||
|
p.size = nil
|
||||||
|
|
||||||
|
switch t1 := typ; t1.Kind() {
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
|
||||||
|
|
||||||
|
// proto3 scalar types
|
||||||
|
|
||||||
|
case reflect.Bool:
|
||||||
|
p.enc = (*Buffer).enc_proto3_bool
|
||||||
|
p.dec = (*Buffer).dec_proto3_bool
|
||||||
|
p.size = size_proto3_bool
|
||||||
|
case reflect.Int32:
|
||||||
|
p.enc = (*Buffer).enc_proto3_int32
|
||||||
|
p.dec = (*Buffer).dec_proto3_int32
|
||||||
|
p.size = size_proto3_int32
|
||||||
|
case reflect.Uint32:
|
||||||
|
p.enc = (*Buffer).enc_proto3_uint32
|
||||||
|
p.dec = (*Buffer).dec_proto3_int32 // can reuse
|
||||||
|
p.size = size_proto3_uint32
|
||||||
|
case reflect.Int64, reflect.Uint64:
|
||||||
|
p.enc = (*Buffer).enc_proto3_int64
|
||||||
|
p.dec = (*Buffer).dec_proto3_int64
|
||||||
|
p.size = size_proto3_int64
|
||||||
|
case reflect.Float32:
|
||||||
|
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
|
||||||
|
p.dec = (*Buffer).dec_proto3_int32
|
||||||
|
p.size = size_proto3_uint32
|
||||||
|
case reflect.Float64:
|
||||||
|
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
|
||||||
|
p.dec = (*Buffer).dec_proto3_int64
|
||||||
|
p.size = size_proto3_int64
|
||||||
|
case reflect.String:
|
||||||
|
p.enc = (*Buffer).enc_proto3_string
|
||||||
|
p.dec = (*Buffer).dec_proto3_string
|
||||||
|
p.size = size_proto3_string
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
switch t2 := t1.Elem(); t2.Kind() {
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
|
||||||
|
break
|
||||||
|
case reflect.Bool:
|
||||||
|
p.enc = (*Buffer).enc_bool
|
||||||
|
p.dec = (*Buffer).dec_bool
|
||||||
|
p.size = size_bool
|
||||||
|
case reflect.Int32:
|
||||||
|
p.enc = (*Buffer).enc_int32
|
||||||
|
p.dec = (*Buffer).dec_int32
|
||||||
|
p.size = size_int32
|
||||||
|
case reflect.Uint32:
|
||||||
|
p.enc = (*Buffer).enc_uint32
|
||||||
|
p.dec = (*Buffer).dec_int32 // can reuse
|
||||||
|
p.size = size_uint32
|
||||||
|
case reflect.Int64, reflect.Uint64:
|
||||||
|
p.enc = (*Buffer).enc_int64
|
||||||
|
p.dec = (*Buffer).dec_int64
|
||||||
|
p.size = size_int64
|
||||||
|
case reflect.Float32:
|
||||||
|
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
|
||||||
|
p.dec = (*Buffer).dec_int32
|
||||||
|
p.size = size_uint32
|
||||||
|
case reflect.Float64:
|
||||||
|
p.enc = (*Buffer).enc_int64 // can just treat them as bits
|
||||||
|
p.dec = (*Buffer).dec_int64
|
||||||
|
p.size = size_int64
|
||||||
|
case reflect.String:
|
||||||
|
p.enc = (*Buffer).enc_string
|
||||||
|
p.dec = (*Buffer).dec_string
|
||||||
|
p.size = size_string
|
||||||
|
case reflect.Struct:
|
||||||
|
p.stype = t1.Elem()
|
||||||
|
p.isMarshaler = isMarshaler(t1)
|
||||||
|
p.isUnmarshaler = isUnmarshaler(t1)
|
||||||
|
if p.Wire == "bytes" {
|
||||||
|
p.enc = (*Buffer).enc_struct_message
|
||||||
|
p.dec = (*Buffer).dec_struct_message
|
||||||
|
p.size = size_struct_message
|
||||||
|
} else {
|
||||||
|
p.enc = (*Buffer).enc_struct_group
|
||||||
|
p.dec = (*Buffer).dec_struct_group
|
||||||
|
p.size = size_struct_group
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
switch t2 := t1.Elem(); t2.Kind() {
|
||||||
|
default:
|
||||||
|
logNoSliceEnc(t1, t2)
|
||||||
|
break
|
||||||
|
case reflect.Bool:
|
||||||
|
if p.Packed {
|
||||||
|
p.enc = (*Buffer).enc_slice_packed_bool
|
||||||
|
p.size = size_slice_packed_bool
|
||||||
|
} else {
|
||||||
|
p.enc = (*Buffer).enc_slice_bool
|
||||||
|
p.size = size_slice_bool
|
||||||
|
}
|
||||||
|
p.dec = (*Buffer).dec_slice_bool
|
||||||
|
p.packedDec = (*Buffer).dec_slice_packed_bool
|
||||||
|
case reflect.Int32:
|
||||||
|
if p.Packed {
|
||||||
|
p.enc = (*Buffer).enc_slice_packed_int32
|
||||||
|
p.size = size_slice_packed_int32
|
||||||
|
} else {
|
||||||
|
p.enc = (*Buffer).enc_slice_int32
|
||||||
|
p.size = size_slice_int32
|
||||||
|
}
|
||||||
|
p.dec = (*Buffer).dec_slice_int32
|
||||||
|
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||||
|
case reflect.Uint32:
|
||||||
|
if p.Packed {
|
||||||
|
p.enc = (*Buffer).enc_slice_packed_uint32
|
||||||
|
p.size = size_slice_packed_uint32
|
||||||
|
} else {
|
||||||
|
p.enc = (*Buffer).enc_slice_uint32
|
||||||
|
p.size = size_slice_uint32
|
||||||
|
}
|
||||||
|
p.dec = (*Buffer).dec_slice_int32
|
||||||
|
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||||
|
case reflect.Int64, reflect.Uint64:
|
||||||
|
if p.Packed {
|
||||||
|
p.enc = (*Buffer).enc_slice_packed_int64
|
||||||
|
p.size = size_slice_packed_int64
|
||||||
|
} else {
|
||||||
|
p.enc = (*Buffer).enc_slice_int64
|
||||||
|
p.size = size_slice_int64
|
||||||
|
}
|
||||||
|
p.dec = (*Buffer).dec_slice_int64
|
||||||
|
p.packedDec = (*Buffer).dec_slice_packed_int64
|
||||||
|
case reflect.Uint8:
|
||||||
|
p.dec = (*Buffer).dec_slice_byte
|
||||||
|
if p.proto3 {
|
||||||
|
p.enc = (*Buffer).enc_proto3_slice_byte
|
||||||
|
p.size = size_proto3_slice_byte
|
||||||
|
} else {
|
||||||
|
p.enc = (*Buffer).enc_slice_byte
|
||||||
|
p.size = size_slice_byte
|
||||||
|
}
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
switch t2.Bits() {
|
||||||
|
case 32:
|
||||||
|
// can just treat them as bits
|
||||||
|
if p.Packed {
|
||||||
|
p.enc = (*Buffer).enc_slice_packed_uint32
|
||||||
|
p.size = size_slice_packed_uint32
|
||||||
|
} else {
|
||||||
|
p.enc = (*Buffer).enc_slice_uint32
|
||||||
|
p.size = size_slice_uint32
|
||||||
|
}
|
||||||
|
p.dec = (*Buffer).dec_slice_int32
|
||||||
|
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||||
|
case 64:
|
||||||
|
// can just treat them as bits
|
||||||
|
if p.Packed {
|
||||||
|
p.enc = (*Buffer).enc_slice_packed_int64
|
||||||
|
p.size = size_slice_packed_int64
|
||||||
|
} else {
|
||||||
|
p.enc = (*Buffer).enc_slice_int64
|
||||||
|
p.size = size_slice_int64
|
||||||
|
}
|
||||||
|
p.dec = (*Buffer).dec_slice_int64
|
||||||
|
p.packedDec = (*Buffer).dec_slice_packed_int64
|
||||||
|
default:
|
||||||
|
logNoSliceEnc(t1, t2)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
p.enc = (*Buffer).enc_slice_string
|
||||||
|
p.dec = (*Buffer).dec_slice_string
|
||||||
|
p.size = size_slice_string
|
||||||
|
case reflect.Ptr:
|
||||||
|
switch t3 := t2.Elem(); t3.Kind() {
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
|
||||||
|
break
|
||||||
|
case reflect.Struct:
|
||||||
|
p.stype = t2.Elem()
|
||||||
|
p.isMarshaler = isMarshaler(t2)
|
||||||
|
p.isUnmarshaler = isUnmarshaler(t2)
|
||||||
|
if p.Wire == "bytes" {
|
||||||
|
p.enc = (*Buffer).enc_slice_struct_message
|
||||||
|
p.dec = (*Buffer).dec_slice_struct_message
|
||||||
|
p.size = size_slice_struct_message
|
||||||
|
} else {
|
||||||
|
p.enc = (*Buffer).enc_slice_struct_group
|
||||||
|
p.dec = (*Buffer).dec_slice_struct_group
|
||||||
|
p.size = size_slice_struct_group
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
switch t2.Elem().Kind() {
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
|
||||||
|
break
|
||||||
|
case reflect.Uint8:
|
||||||
|
p.enc = (*Buffer).enc_slice_slice_byte
|
||||||
|
p.dec = (*Buffer).dec_slice_slice_byte
|
||||||
|
p.size = size_slice_slice_byte
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
p.enc = (*Buffer).enc_new_map
|
||||||
|
p.dec = (*Buffer).dec_new_map
|
||||||
|
p.size = size_new_map
|
||||||
|
|
||||||
|
p.mtype = t1
|
||||||
|
p.mkeyprop = &Properties{}
|
||||||
|
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||||
|
p.mvalprop = &Properties{}
|
||||||
|
vtype := p.mtype.Elem()
|
||||||
|
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||||
|
// The value type is not a message (*T) or bytes ([]byte),
|
||||||
|
// so we need encoders for the pointer to this type.
|
||||||
|
vtype = reflect.PtrTo(vtype)
|
||||||
|
}
|
||||||
|
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// precalculate tag code
|
||||||
|
wire := p.WireType
|
||||||
|
if p.Packed {
|
||||||
|
wire = WireBytes
|
||||||
|
}
|
||||||
|
x := uint32(p.Tag)<<3 | uint32(wire)
|
||||||
|
i := 0
|
||||||
|
for i = 0; x > 127; i++ {
|
||||||
|
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
|
||||||
|
x >>= 7
|
||||||
|
}
|
||||||
|
p.tagbuf[i] = uint8(x)
|
||||||
|
p.tagcode = p.tagbuf[0 : i+1]
|
||||||
|
|
||||||
|
if p.stype != nil {
|
||||||
|
if lockGetProp {
|
||||||
|
p.sprop = GetProperties(p.stype)
|
||||||
|
} else {
|
||||||
|
p.sprop = getPropertiesLocked(p.stype)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||||
|
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||||
|
)
|
||||||
|
|
||||||
|
// isMarshaler reports whether type t implements Marshaler.
|
||||||
|
func isMarshaler(t reflect.Type) bool {
|
||||||
|
// We're checking for (likely) pointer-receiver methods
|
||||||
|
// so if t is not a pointer, something is very wrong.
|
||||||
|
// The calls above only invoke isMarshaler on pointer types.
|
||||||
|
if t.Kind() != reflect.Ptr {
|
||||||
|
panic("proto: misuse of isMarshaler")
|
||||||
|
}
|
||||||
|
return t.Implements(marshalerType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isUnmarshaler reports whether type t implements Unmarshaler.
|
||||||
|
func isUnmarshaler(t reflect.Type) bool {
|
||||||
|
// We're checking for (likely) pointer-receiver methods
|
||||||
|
// so if t is not a pointer, something is very wrong.
|
||||||
|
// The calls above only invoke isUnmarshaler on pointer types.
|
||||||
|
if t.Kind() != reflect.Ptr {
|
||||||
|
panic("proto: misuse of isUnmarshaler")
|
||||||
|
}
|
||||||
|
return t.Implements(unmarshalerType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init populates the properties from a protocol buffer struct tag.
|
||||||
|
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||||
|
p.init(typ, name, tag, f, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
||||||
|
// "bytes,49,opt,def=hello!"
|
||||||
|
p.Name = name
|
||||||
|
p.OrigName = name
|
||||||
|
if f != nil {
|
||||||
|
p.field = toField(f)
|
||||||
|
}
|
||||||
|
if tag == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.Parse(tag)
|
||||||
|
p.setEncAndDec(typ, f, lockGetProp)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
propertiesMu sync.RWMutex
|
||||||
|
propertiesMap = make(map[reflect.Type]*StructProperties)
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetProperties returns the list of properties for the type represented by t.
|
||||||
|
// t must represent a generated struct type of a protocol message.
|
||||||
|
func GetProperties(t reflect.Type) *StructProperties {
|
||||||
|
if t.Kind() != reflect.Struct {
|
||||||
|
panic("proto: type must have kind struct")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Most calls to GetProperties in a long-running program will be
|
||||||
|
// retrieving details for types we have seen before.
|
||||||
|
propertiesMu.RLock()
|
||||||
|
sprop, ok := propertiesMap[t]
|
||||||
|
propertiesMu.RUnlock()
|
||||||
|
if ok {
|
||||||
|
if collectStats {
|
||||||
|
stats.Chit++
|
||||||
|
}
|
||||||
|
return sprop
|
||||||
|
}
|
||||||
|
|
||||||
|
propertiesMu.Lock()
|
||||||
|
sprop = getPropertiesLocked(t)
|
||||||
|
propertiesMu.Unlock()
|
||||||
|
return sprop
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPropertiesLocked requires that propertiesMu is held.
|
||||||
|
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||||
|
if prop, ok := propertiesMap[t]; ok {
|
||||||
|
if collectStats {
|
||||||
|
stats.Chit++
|
||||||
|
}
|
||||||
|
return prop
|
||||||
|
}
|
||||||
|
if collectStats {
|
||||||
|
stats.Cmiss++
|
||||||
|
}
|
||||||
|
|
||||||
|
prop := new(StructProperties)
|
||||||
|
// in case of recursive protos, fill this in now.
|
||||||
|
propertiesMap[t] = prop
|
||||||
|
|
||||||
|
// build properties
|
||||||
|
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
|
||||||
|
reflect.PtrTo(t).Implements(extendableProtoV1Type)
|
||||||
|
prop.unrecField = invalidField
|
||||||
|
prop.Prop = make([]*Properties, t.NumField())
|
||||||
|
prop.order = make([]int, t.NumField())
|
||||||
|
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
p := new(Properties)
|
||||||
|
name := f.Name
|
||||||
|
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||||
|
|
||||||
|
if f.Name == "XXX_InternalExtensions" { // special case
|
||||||
|
p.enc = (*Buffer).enc_exts
|
||||||
|
p.dec = nil // not needed
|
||||||
|
p.size = size_exts
|
||||||
|
} else if f.Name == "XXX_extensions" { // special case
|
||||||
|
p.enc = (*Buffer).enc_map
|
||||||
|
p.dec = nil // not needed
|
||||||
|
p.size = size_map
|
||||||
|
} else if f.Name == "XXX_unrecognized" { // special case
|
||||||
|
prop.unrecField = toField(&f)
|
||||||
|
}
|
||||||
|
oneof := f.Tag.Get("protobuf_oneof") // special case
|
||||||
|
if oneof != "" {
|
||||||
|
// Oneof fields don't use the traditional protobuf tag.
|
||||||
|
p.OrigName = oneof
|
||||||
|
}
|
||||||
|
prop.Prop[i] = p
|
||||||
|
prop.order[i] = i
|
||||||
|
if debug {
|
||||||
|
print(i, " ", f.Name, " ", t.String(), " ")
|
||||||
|
if p.Tag > 0 {
|
||||||
|
print(p.String())
|
||||||
|
}
|
||||||
|
print("\n")
|
||||||
|
}
|
||||||
|
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
|
||||||
|
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-order prop.order.
|
||||||
|
sort.Sort(prop)
|
||||||
|
|
||||||
|
type oneofMessage interface {
|
||||||
|
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||||
|
}
|
||||||
|
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
||||||
|
var oots []interface{}
|
||||||
|
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
|
||||||
|
prop.stype = t
|
||||||
|
|
||||||
|
// Interpret oneof metadata.
|
||||||
|
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||||
|
for _, oot := range oots {
|
||||||
|
oop := &OneofProperties{
|
||||||
|
Type: reflect.ValueOf(oot).Type(), // *T
|
||||||
|
Prop: new(Properties),
|
||||||
|
}
|
||||||
|
sft := oop.Type.Elem().Field(0)
|
||||||
|
oop.Prop.Name = sft.Name
|
||||||
|
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||||
|
// There will be exactly one interface field that
|
||||||
|
// this new value is assignable to.
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
if f.Type.Kind() != reflect.Interface {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !oop.Type.AssignableTo(f.Type) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
oop.Field = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// build required counts
|
||||||
|
// build tags
|
||||||
|
reqCount := 0
|
||||||
|
prop.decoderOrigNames = make(map[string]int)
|
||||||
|
for i, p := range prop.Prop {
|
||||||
|
if strings.HasPrefix(p.Name, "XXX_") {
|
||||||
|
// Internal fields should not appear in tags/origNames maps.
|
||||||
|
// They are handled specially when encoding and decoding.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if p.Required {
|
||||||
|
reqCount++
|
||||||
|
}
|
||||||
|
prop.decoderTags.put(p.Tag, i)
|
||||||
|
prop.decoderOrigNames[p.OrigName] = i
|
||||||
|
}
|
||||||
|
prop.reqCount = reqCount
|
||||||
|
|
||||||
|
return prop
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the Properties object for the x[0]'th field of the structure.
|
||||||
|
func propByIndex(t reflect.Type, x []int) *Properties {
|
||||||
|
if len(x) != 1 {
|
||||||
|
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
prop := GetProperties(t)
|
||||||
|
return prop.Prop[x[0]]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the address and type of a pointer to a struct from an interface.
|
||||||
|
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
|
||||||
|
if pb == nil {
|
||||||
|
err = ErrNil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// get the reflect type of the pointer to the struct.
|
||||||
|
t = reflect.TypeOf(pb)
|
||||||
|
// get the address of the struct.
|
||||||
|
value := reflect.ValueOf(pb)
|
||||||
|
b = toStructPointer(value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// A global registry of enum types.
|
||||||
|
// The generated code will register the generated maps by calling RegisterEnum.
|
||||||
|
|
||||||
|
var enumValueMaps = make(map[string]map[string]int32)
|
||||||
|
|
||||||
|
// RegisterEnum is called from the generated code to install the enum descriptor
|
||||||
|
// maps into the global table to aid parsing text format protocol buffers.
|
||||||
|
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
||||||
|
if _, ok := enumValueMaps[typeName]; ok {
|
||||||
|
panic("proto: duplicate enum registered: " + typeName)
|
||||||
|
}
|
||||||
|
enumValueMaps[typeName] = valueMap
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumValueMap returns the mapping from names to integers of the
|
||||||
|
// enum type enumType, or a nil if not found.
|
||||||
|
func EnumValueMap(enumType string) map[string]int32 {
|
||||||
|
return enumValueMaps[enumType]
|
||||||
|
}
|
||||||
|
|
||||||
|
// A registry of all linked message types.
|
||||||
|
// The string is a fully-qualified proto name ("pkg.Message").
|
||||||
|
var (
|
||||||
|
protoTypes = make(map[string]reflect.Type)
|
||||||
|
revProtoTypes = make(map[reflect.Type]string)
|
||||||
|
)
|
||||||
|
|
||||||
|
// RegisterType is called from generated code and maps from the fully qualified
|
||||||
|
// proto name to the type (pointer to struct) of the protocol buffer.
|
||||||
|
func RegisterType(x Message, name string) {
|
||||||
|
if _, ok := protoTypes[name]; ok {
|
||||||
|
// TODO: Some day, make this a panic.
|
||||||
|
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t := reflect.TypeOf(x)
|
||||||
|
protoTypes[name] = t
|
||||||
|
revProtoTypes[t] = name
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageName returns the fully-qualified proto name for the given message type.
|
||||||
|
func MessageName(x Message) string {
|
||||||
|
type xname interface {
|
||||||
|
XXX_MessageName() string
|
||||||
|
}
|
||||||
|
if m, ok := x.(xname); ok {
|
||||||
|
return m.XXX_MessageName()
|
||||||
|
}
|
||||||
|
return revProtoTypes[reflect.TypeOf(x)]
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageType returns the message type (pointer to struct) for a named message.
|
||||||
|
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
||||||
|
|
||||||
|
// A registry of all linked proto files.
|
||||||
|
var (
|
||||||
|
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
||||||
|
)
|
||||||
|
|
||||||
|
// RegisterFile is called from generated code and maps from the
|
||||||
|
// full file name of a .proto file to its compressed FileDescriptorProto.
|
||||||
|
func RegisterFile(filename string, fileDescriptor []byte) {
|
||||||
|
protoFiles[filename] = fileDescriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
||||||
|
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
347
vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
vendored
Normal file
347
vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
vendored
Normal file
|
@ -0,0 +1,347 @@
|
||||||
|
// Code generated by protoc-gen-go.
|
||||||
|
// source: proto3_proto/proto3.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package proto3_proto is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
proto3_proto/proto3.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
Message
|
||||||
|
Nested
|
||||||
|
MessageWithMap
|
||||||
|
IntMap
|
||||||
|
IntMaps
|
||||||
|
*/
|
||||||
|
package proto3_proto
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import google_protobuf "github.com/golang/protobuf/ptypes/any"
|
||||||
|
import testdata "github.com/golang/protobuf/proto/testdata"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
type Message_Humour int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
Message_UNKNOWN Message_Humour = 0
|
||||||
|
Message_PUNS Message_Humour = 1
|
||||||
|
Message_SLAPSTICK Message_Humour = 2
|
||||||
|
Message_BILL_BAILEY Message_Humour = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
var Message_Humour_name = map[int32]string{
|
||||||
|
0: "UNKNOWN",
|
||||||
|
1: "PUNS",
|
||||||
|
2: "SLAPSTICK",
|
||||||
|
3: "BILL_BAILEY",
|
||||||
|
}
|
||||||
|
var Message_Humour_value = map[string]int32{
|
||||||
|
"UNKNOWN": 0,
|
||||||
|
"PUNS": 1,
|
||||||
|
"SLAPSTICK": 2,
|
||||||
|
"BILL_BAILEY": 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x Message_Humour) String() string {
|
||||||
|
return proto.EnumName(Message_Humour_name, int32(x))
|
||||||
|
}
|
||||||
|
func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
|
||||||
|
|
||||||
|
type Message struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
|
Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"`
|
||||||
|
HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"`
|
||||||
|
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
|
||||||
|
ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"`
|
||||||
|
TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"`
|
||||||
|
Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"`
|
||||||
|
Key []uint64 `protobuf:"varint,5,rep,packed,name=key" json:"key,omitempty"`
|
||||||
|
ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey" json:"short_key,omitempty"`
|
||||||
|
Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"`
|
||||||
|
RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"`
|
||||||
|
Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
|
Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"`
|
||||||
|
Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
|
Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"`
|
||||||
|
ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"`
|
||||||
|
Submessage *Message `protobuf:"bytes,17,opt,name=submessage" json:"submessage,omitempty"`
|
||||||
|
Children []*Message `protobuf:"bytes,18,rep,name=children" json:"children,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) Reset() { *m = Message{} }
|
||||||
|
func (m *Message) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Message) ProtoMessage() {}
|
||||||
|
func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||||
|
|
||||||
|
func (m *Message) GetName() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetHilarity() Message_Humour {
|
||||||
|
if m != nil {
|
||||||
|
return m.Hilarity
|
||||||
|
}
|
||||||
|
return Message_UNKNOWN
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetHeightInCm() uint32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.HeightInCm
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetData() []byte {
|
||||||
|
if m != nil {
|
||||||
|
return m.Data
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetResultCount() int64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.ResultCount
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetTrueScotsman() bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.TrueScotsman
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetScore() float32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Score
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetKey() []uint64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Key
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetShortKey() []int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.ShortKey
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetNested() *Nested {
|
||||||
|
if m != nil {
|
||||||
|
return m.Nested
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetRFunny() []Message_Humour {
|
||||||
|
if m != nil {
|
||||||
|
return m.RFunny
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetTerrain() map[string]*Nested {
|
||||||
|
if m != nil {
|
||||||
|
return m.Terrain
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetProto2Field() *testdata.SubDefaults {
|
||||||
|
if m != nil {
|
||||||
|
return m.Proto2Field
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {
|
||||||
|
if m != nil {
|
||||||
|
return m.Proto2Value
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetAnything() *google_protobuf.Any {
|
||||||
|
if m != nil {
|
||||||
|
return m.Anything
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetManyThings() []*google_protobuf.Any {
|
||||||
|
if m != nil {
|
||||||
|
return m.ManyThings
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetSubmessage() *Message {
|
||||||
|
if m != nil {
|
||||||
|
return m.Submessage
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetChildren() []*Message {
|
||||||
|
if m != nil {
|
||||||
|
return m.Children
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Nested struct {
|
||||||
|
Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"`
|
||||||
|
Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Nested) Reset() { *m = Nested{} }
|
||||||
|
func (m *Nested) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Nested) ProtoMessage() {}
|
||||||
|
func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||||
|
|
||||||
|
func (m *Nested) GetBunny() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Bunny
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Nested) GetCute() bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.Cute
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type MessageWithMap struct {
|
||||||
|
ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
|
||||||
|
func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*MessageWithMap) ProtoMessage() {}
|
||||||
|
func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||||
|
|
||||||
|
func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
|
||||||
|
if m != nil {
|
||||||
|
return m.ByteMapping
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type IntMap struct {
|
||||||
|
Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt" json:"rtt,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *IntMap) Reset() { *m = IntMap{} }
|
||||||
|
func (m *IntMap) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*IntMap) ProtoMessage() {}
|
||||||
|
func (*IntMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||||
|
|
||||||
|
func (m *IntMap) GetRtt() map[int32]int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Rtt
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type IntMaps struct {
|
||||||
|
Maps []*IntMap `protobuf:"bytes,1,rep,name=maps" json:"maps,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *IntMaps) Reset() { *m = IntMaps{} }
|
||||||
|
func (m *IntMaps) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*IntMaps) ProtoMessage() {}
|
||||||
|
func (*IntMaps) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||||
|
|
||||||
|
func (m *IntMaps) GetMaps() []*IntMap {
|
||||||
|
if m != nil {
|
||||||
|
return m.Maps
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Message)(nil), "proto3_proto.Message")
|
||||||
|
proto.RegisterType((*Nested)(nil), "proto3_proto.Nested")
|
||||||
|
proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap")
|
||||||
|
proto.RegisterType((*IntMap)(nil), "proto3_proto.IntMap")
|
||||||
|
proto.RegisterType((*IntMaps)(nil), "proto3_proto.IntMaps")
|
||||||
|
proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptor0) }
|
||||||
|
|
||||||
|
var fileDescriptor0 = []byte{
|
||||||
|
// 733 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x6d, 0x6f, 0xf3, 0x34,
|
||||||
|
0x14, 0x25, 0x4d, 0x5f, 0xd2, 0x9b, 0x74, 0x0b, 0x5e, 0x91, 0xbc, 0x02, 0x52, 0x28, 0x12, 0x8a,
|
||||||
|
0x78, 0x49, 0xa1, 0xd3, 0xd0, 0x84, 0x10, 0x68, 0x1b, 0x9b, 0xa8, 0xd6, 0x95, 0xca, 0xdd, 0x98,
|
||||||
|
0xf8, 0x14, 0xa5, 0xad, 0xdb, 0x46, 0x34, 0x4e, 0x49, 0x1c, 0xa4, 0xfc, 0x1d, 0xfe, 0x28, 0x8f,
|
||||||
|
0x6c, 0xa7, 0x5d, 0x36, 0x65, 0xcf, 0xf3, 0x29, 0xf6, 0xf1, 0xb9, 0xf7, 0x9c, 0x1c, 0x5f, 0xc3,
|
||||||
|
0xe9, 0x2e, 0x89, 0x79, 0x7c, 0xe6, 0xcb, 0xcf, 0x40, 0x6d, 0x3c, 0xf9, 0x41, 0x56, 0xf9, 0xa8,
|
||||||
|
0x77, 0xba, 0x8e, 0xe3, 0xf5, 0x96, 0x2a, 0xca, 0x3c, 0x5b, 0x0d, 0x02, 0x96, 0x2b, 0x62, 0xef,
|
||||||
|
0x84, 0xd3, 0x94, 0x2f, 0x03, 0x1e, 0x0c, 0xc4, 0x42, 0x81, 0xfd, 0xff, 0x5b, 0xd0, 0xba, 0xa7,
|
||||||
|
0x69, 0x1a, 0xac, 0x29, 0x42, 0x50, 0x67, 0x41, 0x44, 0xb1, 0xe6, 0x68, 0x6e, 0x9b, 0xc8, 0x35,
|
||||||
|
0xba, 0x00, 0x63, 0x13, 0x6e, 0x83, 0x24, 0xe4, 0x39, 0xae, 0x39, 0x9a, 0x7b, 0x34, 0xfc, 0xcc,
|
||||||
|
0x2b, 0x0b, 0x7a, 0x45, 0xb1, 0xf7, 0x7b, 0x16, 0xc5, 0x59, 0x42, 0x0e, 0x6c, 0xe4, 0x80, 0xb5,
|
||||||
|
0xa1, 0xe1, 0x7a, 0xc3, 0xfd, 0x90, 0xf9, 0x8b, 0x08, 0xeb, 0x8e, 0xe6, 0x76, 0x08, 0x28, 0x6c,
|
||||||
|
0xc4, 0xae, 0x23, 0xa1, 0x27, 0xec, 0xe0, 0xba, 0xa3, 0xb9, 0x16, 0x91, 0x6b, 0xf4, 0x05, 0x58,
|
||||||
|
0x09, 0x4d, 0xb3, 0x2d, 0xf7, 0x17, 0x71, 0xc6, 0x38, 0x6e, 0x39, 0x9a, 0xab, 0x13, 0x53, 0x61,
|
||||||
|
0xd7, 0x02, 0x42, 0x5f, 0x42, 0x87, 0x27, 0x19, 0xf5, 0xd3, 0x45, 0xcc, 0xd3, 0x28, 0x60, 0xd8,
|
||||||
|
0x70, 0x34, 0xd7, 0x20, 0x96, 0x00, 0x67, 0x05, 0x86, 0xba, 0xd0, 0x48, 0x17, 0x71, 0x42, 0x71,
|
||||||
|
0xdb, 0xd1, 0xdc, 0x1a, 0x51, 0x1b, 0x64, 0x83, 0xfe, 0x37, 0xcd, 0x71, 0xc3, 0xd1, 0xdd, 0x3a,
|
||||||
|
0x11, 0x4b, 0xf4, 0x29, 0xb4, 0xd3, 0x4d, 0x9c, 0x70, 0x5f, 0xe0, 0x27, 0x8e, 0xee, 0x36, 0x88,
|
||||||
|
0x21, 0x81, 0x3b, 0x9a, 0xa3, 0x6f, 0xa1, 0xc9, 0x68, 0xca, 0xe9, 0x12, 0x37, 0x1d, 0xcd, 0x35,
|
||||||
|
0x87, 0xdd, 0x97, 0xbf, 0x3e, 0x91, 0x67, 0xa4, 0xe0, 0xa0, 0x73, 0x68, 0x25, 0xfe, 0x2a, 0x63,
|
||||||
|
0x2c, 0xc7, 0xb6, 0xa3, 0x7f, 0x30, 0xa9, 0x66, 0x72, 0x2b, 0xb8, 0xe8, 0x67, 0x68, 0x71, 0x9a,
|
||||||
|
0x24, 0x41, 0xc8, 0x30, 0x38, 0xba, 0x6b, 0x0e, 0xfb, 0xd5, 0x65, 0x0f, 0x8a, 0x74, 0xc3, 0x78,
|
||||||
|
0x92, 0x93, 0x7d, 0x09, 0xba, 0x00, 0x75, 0xff, 0x43, 0x7f, 0x15, 0xd2, 0xed, 0x12, 0x9b, 0xd2,
|
||||||
|
0xe8, 0x27, 0xde, 0xfe, 0xae, 0xbd, 0x59, 0x36, 0xff, 0x8d, 0xae, 0x82, 0x6c, 0xcb, 0x53, 0x62,
|
||||||
|
0x2a, 0xea, 0xad, 0x60, 0xa2, 0xd1, 0xa1, 0xf2, 0xdf, 0x60, 0x9b, 0x51, 0xdc, 0x91, 0xe2, 0x5f,
|
||||||
|
0x55, 0x8b, 0x4f, 0x25, 0xf3, 0x4f, 0x41, 0x54, 0x06, 0x8a, 0x56, 0x12, 0x41, 0xdf, 0x83, 0x11,
|
||||||
|
0xb0, 0x9c, 0x6f, 0x42, 0xb6, 0xc6, 0x47, 0x45, 0x52, 0x6a, 0x0e, 0xbd, 0xfd, 0x1c, 0x7a, 0x97,
|
||||||
|
0x2c, 0x27, 0x07, 0x16, 0x3a, 0x07, 0x33, 0x0a, 0x58, 0xee, 0xcb, 0x5d, 0x8a, 0x8f, 0xa5, 0x76,
|
||||||
|
0x75, 0x11, 0x08, 0xe2, 0x83, 0xe4, 0xa1, 0x73, 0x80, 0x34, 0x9b, 0x47, 0xca, 0x14, 0xfe, 0xb8,
|
||||||
|
0xf8, 0xd7, 0x2a, 0xc7, 0xa4, 0x44, 0x44, 0x3f, 0x80, 0xb1, 0xd8, 0x84, 0xdb, 0x65, 0x42, 0x19,
|
||||||
|
0x46, 0x52, 0xea, 0x8d, 0xa2, 0x03, 0xad, 0x37, 0x05, 0xab, 0x1c, 0xf8, 0x7e, 0x72, 0xd4, 0xd3,
|
||||||
|
0x90, 0x93, 0xf3, 0x35, 0x34, 0x54, 0x70, 0xb5, 0xf7, 0xcc, 0x86, 0xa2, 0xfc, 0x54, 0xbb, 0xd0,
|
||||||
|
0x7a, 0x8f, 0x60, 0xbf, 0x4e, 0xb1, 0xa2, 0xeb, 0x37, 0x2f, 0xbb, 0xbe, 0x71, 0x91, 0xcf, 0x6d,
|
||||||
|
0xfb, 0xbf, 0x42, 0x53, 0x0d, 0x14, 0x32, 0xa1, 0xf5, 0x38, 0xb9, 0x9b, 0xfc, 0xf1, 0x34, 0xb1,
|
||||||
|
0x3f, 0x42, 0x06, 0xd4, 0xa7, 0x8f, 0x93, 0x99, 0xad, 0xa1, 0x0e, 0xb4, 0x67, 0xe3, 0xcb, 0xe9,
|
||||||
|
0xec, 0x61, 0x74, 0x7d, 0x67, 0xd7, 0xd0, 0x31, 0x98, 0x57, 0xa3, 0xf1, 0xd8, 0xbf, 0xba, 0x1c,
|
||||||
|
0x8d, 0x6f, 0xfe, 0xb2, 0xf5, 0xfe, 0x10, 0x9a, 0xca, 0xac, 0x78, 0x33, 0x73, 0x39, 0xbe, 0xca,
|
||||||
|
0x8f, 0xda, 0x88, 0x57, 0xba, 0xc8, 0xb8, 0x32, 0x64, 0x10, 0xb9, 0xee, 0xff, 0xa7, 0xc1, 0x51,
|
||||||
|
0x91, 0xd9, 0x53, 0xc8, 0x37, 0xf7, 0xc1, 0x0e, 0x4d, 0xc1, 0x9a, 0xe7, 0x9c, 0xfa, 0x51, 0xb0,
|
||||||
|
0xdb, 0x89, 0x39, 0xd0, 0x64, 0xce, 0xdf, 0x55, 0xe6, 0x5c, 0xd4, 0x78, 0x57, 0x39, 0xa7, 0xf7,
|
||||||
|
0x8a, 0x5f, 0x4c, 0xd5, 0xfc, 0x19, 0xe9, 0xfd, 0x02, 0xf6, 0x6b, 0x42, 0x39, 0x30, 0x43, 0x05,
|
||||||
|
0xd6, 0x2d, 0x07, 0x66, 0x95, 0x93, 0xf9, 0x07, 0x9a, 0x23, 0xc6, 0x85, 0xb7, 0x01, 0xe8, 0x09,
|
||||||
|
0xe7, 0x85, 0xa5, 0xcf, 0x5f, 0x5a, 0x52, 0x14, 0x8f, 0x70, 0xae, 0x2c, 0x08, 0x66, 0xef, 0x47,
|
||||||
|
0x30, 0xf6, 0x40, 0x59, 0xb2, 0x51, 0x21, 0xd9, 0x28, 0x4b, 0x9e, 0x41, 0x4b, 0xf5, 0x4b, 0x91,
|
||||||
|
0x0b, 0xf5, 0x28, 0xd8, 0xa5, 0x85, 0x68, 0xb7, 0x4a, 0x94, 0x48, 0xc6, 0xbc, 0xa9, 0x8e, 0xde,
|
||||||
|
0x05, 0x00, 0x00, 0xff, 0xff, 0x75, 0x38, 0xad, 0x84, 0xe4, 0x05, 0x00, 0x00,
|
||||||
|
}
|
87
vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
vendored
Normal file
87
vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
vendored
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
import "google/protobuf/any.proto";
|
||||||
|
import "testdata/test.proto";
|
||||||
|
|
||||||
|
package proto3_proto;
|
||||||
|
|
||||||
|
message Message {
|
||||||
|
enum Humour {
|
||||||
|
UNKNOWN = 0;
|
||||||
|
PUNS = 1;
|
||||||
|
SLAPSTICK = 2;
|
||||||
|
BILL_BAILEY = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
string name = 1;
|
||||||
|
Humour hilarity = 2;
|
||||||
|
uint32 height_in_cm = 3;
|
||||||
|
bytes data = 4;
|
||||||
|
int64 result_count = 7;
|
||||||
|
bool true_scotsman = 8;
|
||||||
|
float score = 9;
|
||||||
|
|
||||||
|
repeated uint64 key = 5;
|
||||||
|
repeated int32 short_key = 19;
|
||||||
|
Nested nested = 6;
|
||||||
|
repeated Humour r_funny = 16;
|
||||||
|
|
||||||
|
map<string, Nested> terrain = 10;
|
||||||
|
testdata.SubDefaults proto2_field = 11;
|
||||||
|
map<string, testdata.SubDefaults> proto2_value = 13;
|
||||||
|
|
||||||
|
google.protobuf.Any anything = 14;
|
||||||
|
repeated google.protobuf.Any many_things = 15;
|
||||||
|
|
||||||
|
Message submessage = 17;
|
||||||
|
repeated Message children = 18;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Nested {
|
||||||
|
string bunny = 1;
|
||||||
|
bool cute = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MessageWithMap {
|
||||||
|
map<bool, bytes> byte_mapping = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
message IntMap {
|
||||||
|
map<int32, int32> rtt = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message IntMaps {
|
||||||
|
repeated IntMap maps = 1;
|
||||||
|
}
|
135
vendor/src/github.com/golang/protobuf/proto/proto3_test.go
vendored
Normal file
135
vendor/src/github.com/golang/protobuf/proto/proto3_test.go
vendored
Normal file
|
@ -0,0 +1,135 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
tpb "github.com/golang/protobuf/proto/testdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProto3ZeroValues(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
m proto.Message
|
||||||
|
}{
|
||||||
|
{"zero message", &pb.Message{}},
|
||||||
|
{"empty bytes field", &pb.Message{Data: []byte{}}},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
b, err := proto.Marshal(test.m)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: proto.Marshal: %v", test.desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(b) > 0 {
|
||||||
|
t.Errorf("%s: Encoding is non-empty: %q", test.desc, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRoundTripProto3(t *testing.T) {
|
||||||
|
m := &pb.Message{
|
||||||
|
Name: "David", // (2 | 1<<3): 0x0a 0x05 "David"
|
||||||
|
Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01
|
||||||
|
HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01
|
||||||
|
Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto"
|
||||||
|
ResultCount: 47, // (0 | 7<<3): 0x38 0x2f
|
||||||
|
TrueScotsman: true, // (0 | 8<<3): 0x40 0x01
|
||||||
|
Score: 8.1, // (5 | 9<<3): 0x4d <8.1>
|
||||||
|
|
||||||
|
Key: []uint64{1, 0xdeadbeef},
|
||||||
|
Nested: &pb.Nested{
|
||||||
|
Bunny: "Monty",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
t.Logf(" m: %v", m)
|
||||||
|
|
||||||
|
b, err := proto.Marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("proto.Marshal: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf(" b: %q", b)
|
||||||
|
|
||||||
|
m2 := new(pb.Message)
|
||||||
|
if err := proto.Unmarshal(b, m2); err != nil {
|
||||||
|
t.Fatalf("proto.Unmarshal: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("m2: %v", m2)
|
||||||
|
|
||||||
|
if !proto.Equal(m, m2) {
|
||||||
|
t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGettersForBasicTypesExist(t *testing.T) {
|
||||||
|
var m pb.Message
|
||||||
|
if got := m.GetNested().GetBunny(); got != "" {
|
||||||
|
t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got)
|
||||||
|
}
|
||||||
|
if got := m.GetNested().GetCute(); got {
|
||||||
|
t.Errorf("m.GetNested().GetCute() = %t, want false", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProto3SetDefaults(t *testing.T) {
|
||||||
|
in := &pb.Message{
|
||||||
|
Terrain: map[string]*pb.Nested{
|
||||||
|
"meadow": new(pb.Nested),
|
||||||
|
},
|
||||||
|
Proto2Field: new(tpb.SubDefaults),
|
||||||
|
Proto2Value: map[string]*tpb.SubDefaults{
|
||||||
|
"badlands": new(tpb.SubDefaults),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
got := proto.Clone(in).(*pb.Message)
|
||||||
|
proto.SetDefaults(got)
|
||||||
|
|
||||||
|
// There are no defaults in proto3. Everything should be the zero value, but
|
||||||
|
// we need to remember to set defaults for nested proto2 messages.
|
||||||
|
want := &pb.Message{
|
||||||
|
Terrain: map[string]*pb.Nested{
|
||||||
|
"meadow": new(pb.Nested),
|
||||||
|
},
|
||||||
|
Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)},
|
||||||
|
Proto2Value: map[string]*tpb.SubDefaults{
|
||||||
|
"badlands": &tpb.SubDefaults{N: proto.Int64(7)},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if !proto.Equal(got, want) {
|
||||||
|
t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want)
|
||||||
|
}
|
||||||
|
}
|
63
vendor/src/github.com/golang/protobuf/proto/size2_test.go
vendored
Normal file
63
vendor/src/github.com/golang/protobuf/proto/size2_test.go
vendored
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is a separate file and package from size_test.go because that one uses
|
||||||
|
// generated messages and thus may not be in package proto without having a circular
|
||||||
|
// dependency, whereas this file tests unexported details of size.go.
|
||||||
|
|
||||||
|
func TestVarintSize(t *testing.T) {
|
||||||
|
// Check the edge cases carefully.
|
||||||
|
testCases := []struct {
|
||||||
|
n uint64
|
||||||
|
size int
|
||||||
|
}{
|
||||||
|
{0, 1},
|
||||||
|
{1, 1},
|
||||||
|
{127, 1},
|
||||||
|
{128, 2},
|
||||||
|
{16383, 2},
|
||||||
|
{16384, 3},
|
||||||
|
{1<<63 - 1, 9},
|
||||||
|
{1 << 63, 10},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
size := sizeVarint(tc.n)
|
||||||
|
if size != tc.size {
|
||||||
|
t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
164
vendor/src/github.com/golang/protobuf/proto/size_test.go
vendored
Normal file
164
vendor/src/github.com/golang/protobuf/proto/size_test.go
vendored
Normal file
|
@ -0,0 +1,164 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
. "github.com/golang/protobuf/proto"
|
||||||
|
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
pb "github.com/golang/protobuf/proto/testdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
|
||||||
|
|
||||||
|
// messageWithExtension2 is in equal_test.go.
|
||||||
|
var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil {
|
||||||
|
log.Panicf("SetExtension: %v", err)
|
||||||
|
}
|
||||||
|
if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil {
|
||||||
|
log.Panicf("SetExtension: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force messageWithExtension3 to have the extension encoded.
|
||||||
|
Marshal(messageWithExtension3)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var SizeTests = []struct {
|
||||||
|
desc string
|
||||||
|
pb Message
|
||||||
|
}{
|
||||||
|
{"empty", &pb.OtherMessage{}},
|
||||||
|
// Basic types.
|
||||||
|
{"bool", &pb.Defaults{F_Bool: Bool(true)}},
|
||||||
|
{"int32", &pb.Defaults{F_Int32: Int32(12)}},
|
||||||
|
{"negative int32", &pb.Defaults{F_Int32: Int32(-1)}},
|
||||||
|
{"small int64", &pb.Defaults{F_Int64: Int64(1)}},
|
||||||
|
{"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}},
|
||||||
|
{"negative int64", &pb.Defaults{F_Int64: Int64(-1)}},
|
||||||
|
{"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}},
|
||||||
|
{"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}},
|
||||||
|
{"uint32", &pb.Defaults{F_Uint32: Uint32(123)}},
|
||||||
|
{"uint64", &pb.Defaults{F_Uint64: Uint64(124)}},
|
||||||
|
{"float", &pb.Defaults{F_Float: Float32(12.6)}},
|
||||||
|
{"double", &pb.Defaults{F_Double: Float64(13.9)}},
|
||||||
|
{"string", &pb.Defaults{F_String: String("niles")}},
|
||||||
|
{"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}},
|
||||||
|
{"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}},
|
||||||
|
{"sint32", &pb.Defaults{F_Sint32: Int32(65)}},
|
||||||
|
{"sint64", &pb.Defaults{F_Sint64: Int64(67)}},
|
||||||
|
{"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},
|
||||||
|
// Repeated.
|
||||||
|
{"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}},
|
||||||
|
{"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},
|
||||||
|
{"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},
|
||||||
|
{"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},
|
||||||
|
{"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},
|
||||||
|
{"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{
|
||||||
|
// Need enough large numbers to verify that the header is counting the number of bytes
|
||||||
|
// for the field, not the number of elements.
|
||||||
|
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
|
||||||
|
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
|
||||||
|
}}},
|
||||||
|
{"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}},
|
||||||
|
{"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},
|
||||||
|
// Nested.
|
||||||
|
{"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}},
|
||||||
|
{"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},
|
||||||
|
// Other things.
|
||||||
|
{"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},
|
||||||
|
{"extension (unencoded)", messageWithExtension1},
|
||||||
|
{"extension (encoded)", messageWithExtension3},
|
||||||
|
// proto3 message
|
||||||
|
{"proto3 empty", &proto3pb.Message{}},
|
||||||
|
{"proto3 bool", &proto3pb.Message{TrueScotsman: true}},
|
||||||
|
{"proto3 int64", &proto3pb.Message{ResultCount: 1}},
|
||||||
|
{"proto3 uint32", &proto3pb.Message{HeightInCm: 123}},
|
||||||
|
{"proto3 float", &proto3pb.Message{Score: 12.6}},
|
||||||
|
{"proto3 string", &proto3pb.Message{Name: "Snezana"}},
|
||||||
|
{"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
|
||||||
|
{"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
|
||||||
|
{"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
|
||||||
|
{"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}},
|
||||||
|
|
||||||
|
{"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
|
||||||
|
{"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
|
||||||
|
{"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
|
||||||
|
{"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}},
|
||||||
|
|
||||||
|
{"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}},
|
||||||
|
{"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}},
|
||||||
|
{"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}},
|
||||||
|
|
||||||
|
{"oneof not set", &pb.Oneof{}},
|
||||||
|
{"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}},
|
||||||
|
{"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}},
|
||||||
|
{"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}},
|
||||||
|
{"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}},
|
||||||
|
{"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}},
|
||||||
|
{"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}},
|
||||||
|
{"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}},
|
||||||
|
{"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}},
|
||||||
|
{"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}},
|
||||||
|
{"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}},
|
||||||
|
{"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}},
|
||||||
|
{"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}},
|
||||||
|
{"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}},
|
||||||
|
{"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}},
|
||||||
|
{"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}},
|
||||||
|
{"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}},
|
||||||
|
{"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}},
|
||||||
|
{"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}},
|
||||||
|
{"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}},
|
||||||
|
{"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSize(t *testing.T) {
|
||||||
|
for _, tc := range SizeTests {
|
||||||
|
size := Size(tc.pb)
|
||||||
|
b, err := Marshal(tc.pb)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%v: Marshal failed: %v", tc.desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if size != len(b) {
|
||||||
|
t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b))
|
||||||
|
t.Logf("%v: bytes: %#v", tc.desc, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
50
vendor/src/github.com/golang/protobuf/proto/testdata/Makefile
vendored
Normal file
50
vendor/src/github.com/golang/protobuf/proto/testdata/Makefile
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
# Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
#
|
||||||
|
# Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
# https://github.com/golang/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
include ../../Make.protobuf
|
||||||
|
|
||||||
|
all: regenerate
|
||||||
|
|
||||||
|
regenerate:
|
||||||
|
rm -f test.pb.go
|
||||||
|
make test.pb.go
|
||||||
|
|
||||||
|
# The following rules are just aids to development. Not needed for typical testing.
|
||||||
|
|
||||||
|
diff: regenerate
|
||||||
|
git diff test.pb.go
|
||||||
|
|
||||||
|
restore:
|
||||||
|
cp test.pb.go.golden test.pb.go
|
||||||
|
|
||||||
|
preserve:
|
||||||
|
cp test.pb.go test.pb.go.golden
|
86
vendor/src/github.com/golang/protobuf/proto/testdata/golden_test.go
vendored
Normal file
86
vendor/src/github.com/golang/protobuf/proto/testdata/golden_test.go
vendored
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// Verify that the compiler output for test.proto is unchanged.
|
||||||
|
|
||||||
|
package testdata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha1"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// sum returns in string form (for easy comparison) the SHA-1 hash of the named file.
|
||||||
|
func sum(t *testing.T, name string) string {
|
||||||
|
data, err := ioutil.ReadFile(name)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Logf("sum(%q): length is %d", name, len(data))
|
||||||
|
hash := sha1.New()
|
||||||
|
_, err = hash.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("% x", hash.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func run(t *testing.T, name string, args ...string) {
|
||||||
|
cmd := exec.Command(name, args...)
|
||||||
|
cmd.Stdin = os.Stdin
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGolden(t *testing.T) {
|
||||||
|
// Compute the original checksum.
|
||||||
|
goldenSum := sum(t, "test.pb.go")
|
||||||
|
// Run the proto compiler.
|
||||||
|
run(t, "protoc", "--go_out="+os.TempDir(), "test.proto")
|
||||||
|
newFile := filepath.Join(os.TempDir(), "test.pb.go")
|
||||||
|
defer os.Remove(newFile)
|
||||||
|
// Compute the new checksum.
|
||||||
|
newSum := sum(t, newFile)
|
||||||
|
// Verify
|
||||||
|
if newSum != goldenSum {
|
||||||
|
run(t, "diff", "-u", "test.pb.go", newFile)
|
||||||
|
t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go")
|
||||||
|
}
|
||||||
|
}
|
4148
vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go
vendored
Normal file
4148
vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go
vendored
Normal file
File diff suppressed because it is too large
Load diff
548
vendor/src/github.com/golang/protobuf/proto/testdata/test.proto
vendored
Normal file
548
vendor/src/github.com/golang/protobuf/proto/testdata/test.proto
vendored
Normal file
|
@ -0,0 +1,548 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// A feature-rich test file for the protocol compiler and libraries.
|
||||||
|
|
||||||
|
syntax = "proto2";
|
||||||
|
|
||||||
|
package testdata;
|
||||||
|
|
||||||
|
enum FOO { FOO1 = 1; };
|
||||||
|
|
||||||
|
message GoEnum {
|
||||||
|
required FOO foo = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GoTestField {
|
||||||
|
required string Label = 1;
|
||||||
|
required string Type = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GoTest {
|
||||||
|
// An enum, for completeness.
|
||||||
|
enum KIND {
|
||||||
|
VOID = 0;
|
||||||
|
|
||||||
|
// Basic types
|
||||||
|
BOOL = 1;
|
||||||
|
BYTES = 2;
|
||||||
|
FINGERPRINT = 3;
|
||||||
|
FLOAT = 4;
|
||||||
|
INT = 5;
|
||||||
|
STRING = 6;
|
||||||
|
TIME = 7;
|
||||||
|
|
||||||
|
// Groupings
|
||||||
|
TUPLE = 8;
|
||||||
|
ARRAY = 9;
|
||||||
|
MAP = 10;
|
||||||
|
|
||||||
|
// Table types
|
||||||
|
TABLE = 11;
|
||||||
|
|
||||||
|
// Functions
|
||||||
|
FUNCTION = 12; // last tag
|
||||||
|
};
|
||||||
|
|
||||||
|
// Some typical parameters
|
||||||
|
required KIND Kind = 1;
|
||||||
|
optional string Table = 2;
|
||||||
|
optional int32 Param = 3;
|
||||||
|
|
||||||
|
// Required, repeated and optional foreign fields.
|
||||||
|
required GoTestField RequiredField = 4;
|
||||||
|
repeated GoTestField RepeatedField = 5;
|
||||||
|
optional GoTestField OptionalField = 6;
|
||||||
|
|
||||||
|
// Required fields of all basic types
|
||||||
|
required bool F_Bool_required = 10;
|
||||||
|
required int32 F_Int32_required = 11;
|
||||||
|
required int64 F_Int64_required = 12;
|
||||||
|
required fixed32 F_Fixed32_required = 13;
|
||||||
|
required fixed64 F_Fixed64_required = 14;
|
||||||
|
required uint32 F_Uint32_required = 15;
|
||||||
|
required uint64 F_Uint64_required = 16;
|
||||||
|
required float F_Float_required = 17;
|
||||||
|
required double F_Double_required = 18;
|
||||||
|
required string F_String_required = 19;
|
||||||
|
required bytes F_Bytes_required = 101;
|
||||||
|
required sint32 F_Sint32_required = 102;
|
||||||
|
required sint64 F_Sint64_required = 103;
|
||||||
|
|
||||||
|
// Repeated fields of all basic types
|
||||||
|
repeated bool F_Bool_repeated = 20;
|
||||||
|
repeated int32 F_Int32_repeated = 21;
|
||||||
|
repeated int64 F_Int64_repeated = 22;
|
||||||
|
repeated fixed32 F_Fixed32_repeated = 23;
|
||||||
|
repeated fixed64 F_Fixed64_repeated = 24;
|
||||||
|
repeated uint32 F_Uint32_repeated = 25;
|
||||||
|
repeated uint64 F_Uint64_repeated = 26;
|
||||||
|
repeated float F_Float_repeated = 27;
|
||||||
|
repeated double F_Double_repeated = 28;
|
||||||
|
repeated string F_String_repeated = 29;
|
||||||
|
repeated bytes F_Bytes_repeated = 201;
|
||||||
|
repeated sint32 F_Sint32_repeated = 202;
|
||||||
|
repeated sint64 F_Sint64_repeated = 203;
|
||||||
|
|
||||||
|
// Optional fields of all basic types
|
||||||
|
optional bool F_Bool_optional = 30;
|
||||||
|
optional int32 F_Int32_optional = 31;
|
||||||
|
optional int64 F_Int64_optional = 32;
|
||||||
|
optional fixed32 F_Fixed32_optional = 33;
|
||||||
|
optional fixed64 F_Fixed64_optional = 34;
|
||||||
|
optional uint32 F_Uint32_optional = 35;
|
||||||
|
optional uint64 F_Uint64_optional = 36;
|
||||||
|
optional float F_Float_optional = 37;
|
||||||
|
optional double F_Double_optional = 38;
|
||||||
|
optional string F_String_optional = 39;
|
||||||
|
optional bytes F_Bytes_optional = 301;
|
||||||
|
optional sint32 F_Sint32_optional = 302;
|
||||||
|
optional sint64 F_Sint64_optional = 303;
|
||||||
|
|
||||||
|
// Default-valued fields of all basic types
|
||||||
|
optional bool F_Bool_defaulted = 40 [default=true];
|
||||||
|
optional int32 F_Int32_defaulted = 41 [default=32];
|
||||||
|
optional int64 F_Int64_defaulted = 42 [default=64];
|
||||||
|
optional fixed32 F_Fixed32_defaulted = 43 [default=320];
|
||||||
|
optional fixed64 F_Fixed64_defaulted = 44 [default=640];
|
||||||
|
optional uint32 F_Uint32_defaulted = 45 [default=3200];
|
||||||
|
optional uint64 F_Uint64_defaulted = 46 [default=6400];
|
||||||
|
optional float F_Float_defaulted = 47 [default=314159.];
|
||||||
|
optional double F_Double_defaulted = 48 [default=271828.];
|
||||||
|
optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"];
|
||||||
|
optional bytes F_Bytes_defaulted = 401 [default="Bignose"];
|
||||||
|
optional sint32 F_Sint32_defaulted = 402 [default = -32];
|
||||||
|
optional sint64 F_Sint64_defaulted = 403 [default = -64];
|
||||||
|
|
||||||
|
// Packed repeated fields (no string or bytes).
|
||||||
|
repeated bool F_Bool_repeated_packed = 50 [packed=true];
|
||||||
|
repeated int32 F_Int32_repeated_packed = 51 [packed=true];
|
||||||
|
repeated int64 F_Int64_repeated_packed = 52 [packed=true];
|
||||||
|
repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true];
|
||||||
|
repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true];
|
||||||
|
repeated uint32 F_Uint32_repeated_packed = 55 [packed=true];
|
||||||
|
repeated uint64 F_Uint64_repeated_packed = 56 [packed=true];
|
||||||
|
repeated float F_Float_repeated_packed = 57 [packed=true];
|
||||||
|
repeated double F_Double_repeated_packed = 58 [packed=true];
|
||||||
|
repeated sint32 F_Sint32_repeated_packed = 502 [packed=true];
|
||||||
|
repeated sint64 F_Sint64_repeated_packed = 503 [packed=true];
|
||||||
|
|
||||||
|
// Required, repeated, and optional groups.
|
||||||
|
required group RequiredGroup = 70 {
|
||||||
|
required string RequiredField = 71;
|
||||||
|
};
|
||||||
|
|
||||||
|
repeated group RepeatedGroup = 80 {
|
||||||
|
required string RequiredField = 81;
|
||||||
|
};
|
||||||
|
|
||||||
|
optional group OptionalGroup = 90 {
|
||||||
|
required string RequiredField = 91;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// For testing a group containing a required field.
|
||||||
|
message GoTestRequiredGroupField {
|
||||||
|
required group Group = 1 {
|
||||||
|
required int32 Field = 2;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// For testing skipping of unrecognized fields.
|
||||||
|
// Numbers are all big, larger than tag numbers in GoTestField,
|
||||||
|
// the message used in the corresponding test.
|
||||||
|
message GoSkipTest {
|
||||||
|
required int32 skip_int32 = 11;
|
||||||
|
required fixed32 skip_fixed32 = 12;
|
||||||
|
required fixed64 skip_fixed64 = 13;
|
||||||
|
required string skip_string = 14;
|
||||||
|
required group SkipGroup = 15 {
|
||||||
|
required int32 group_int32 = 16;
|
||||||
|
required string group_string = 17;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For testing packed/non-packed decoder switching.
|
||||||
|
// A serialized instance of one should be deserializable as the other.
|
||||||
|
message NonPackedTest {
|
||||||
|
repeated int32 a = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PackedTest {
|
||||||
|
repeated int32 b = 1 [packed=true];
|
||||||
|
}
|
||||||
|
|
||||||
|
message MaxTag {
|
||||||
|
// Maximum possible tag number.
|
||||||
|
optional string last_field = 536870911;
|
||||||
|
}
|
||||||
|
|
||||||
|
message OldMessage {
|
||||||
|
message Nested {
|
||||||
|
optional string name = 1;
|
||||||
|
}
|
||||||
|
optional Nested nested = 1;
|
||||||
|
|
||||||
|
optional int32 num = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMessage is wire compatible with OldMessage;
|
||||||
|
// imagine it as a future version.
|
||||||
|
message NewMessage {
|
||||||
|
message Nested {
|
||||||
|
optional string name = 1;
|
||||||
|
optional string food_group = 2;
|
||||||
|
}
|
||||||
|
optional Nested nested = 1;
|
||||||
|
|
||||||
|
// This is an int32 in OldMessage.
|
||||||
|
optional int64 num = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Smaller tests for ASCII formatting.
|
||||||
|
|
||||||
|
message InnerMessage {
|
||||||
|
required string host = 1;
|
||||||
|
optional int32 port = 2 [default=4000];
|
||||||
|
optional bool connected = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message OtherMessage {
|
||||||
|
optional int64 key = 1;
|
||||||
|
optional bytes value = 2;
|
||||||
|
optional float weight = 3;
|
||||||
|
optional InnerMessage inner = 4;
|
||||||
|
|
||||||
|
extensions 100 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RequiredInnerMessage {
|
||||||
|
required InnerMessage leo_finally_won_an_oscar = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MyMessage {
|
||||||
|
required int32 count = 1;
|
||||||
|
optional string name = 2;
|
||||||
|
optional string quote = 3;
|
||||||
|
repeated string pet = 4;
|
||||||
|
optional InnerMessage inner = 5;
|
||||||
|
repeated OtherMessage others = 6;
|
||||||
|
optional RequiredInnerMessage we_must_go_deeper = 13;
|
||||||
|
repeated InnerMessage rep_inner = 12;
|
||||||
|
|
||||||
|
enum Color {
|
||||||
|
RED = 0;
|
||||||
|
GREEN = 1;
|
||||||
|
BLUE = 2;
|
||||||
|
};
|
||||||
|
optional Color bikeshed = 7;
|
||||||
|
|
||||||
|
optional group SomeGroup = 8 {
|
||||||
|
optional int32 group_field = 9;
|
||||||
|
}
|
||||||
|
|
||||||
|
// This field becomes [][]byte in the generated code.
|
||||||
|
repeated bytes rep_bytes = 10;
|
||||||
|
|
||||||
|
optional double bigfloat = 11;
|
||||||
|
|
||||||
|
extensions 100 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Ext {
|
||||||
|
extend MyMessage {
|
||||||
|
optional Ext more = 103;
|
||||||
|
optional string text = 104;
|
||||||
|
optional int32 number = 105;
|
||||||
|
}
|
||||||
|
|
||||||
|
optional string data = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
extend MyMessage {
|
||||||
|
repeated string greeting = 106;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ComplexExtension {
|
||||||
|
optional int32 first = 1;
|
||||||
|
optional int32 second = 2;
|
||||||
|
repeated int32 third = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
extend OtherMessage {
|
||||||
|
optional ComplexExtension complex = 200;
|
||||||
|
repeated ComplexExtension r_complex = 201;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DefaultsMessage {
|
||||||
|
enum DefaultsEnum {
|
||||||
|
ZERO = 0;
|
||||||
|
ONE = 1;
|
||||||
|
TWO = 2;
|
||||||
|
};
|
||||||
|
extensions 100 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
extend DefaultsMessage {
|
||||||
|
optional double no_default_double = 101;
|
||||||
|
optional float no_default_float = 102;
|
||||||
|
optional int32 no_default_int32 = 103;
|
||||||
|
optional int64 no_default_int64 = 104;
|
||||||
|
optional uint32 no_default_uint32 = 105;
|
||||||
|
optional uint64 no_default_uint64 = 106;
|
||||||
|
optional sint32 no_default_sint32 = 107;
|
||||||
|
optional sint64 no_default_sint64 = 108;
|
||||||
|
optional fixed32 no_default_fixed32 = 109;
|
||||||
|
optional fixed64 no_default_fixed64 = 110;
|
||||||
|
optional sfixed32 no_default_sfixed32 = 111;
|
||||||
|
optional sfixed64 no_default_sfixed64 = 112;
|
||||||
|
optional bool no_default_bool = 113;
|
||||||
|
optional string no_default_string = 114;
|
||||||
|
optional bytes no_default_bytes = 115;
|
||||||
|
optional DefaultsMessage.DefaultsEnum no_default_enum = 116;
|
||||||
|
|
||||||
|
optional double default_double = 201 [default = 3.1415];
|
||||||
|
optional float default_float = 202 [default = 3.14];
|
||||||
|
optional int32 default_int32 = 203 [default = 42];
|
||||||
|
optional int64 default_int64 = 204 [default = 43];
|
||||||
|
optional uint32 default_uint32 = 205 [default = 44];
|
||||||
|
optional uint64 default_uint64 = 206 [default = 45];
|
||||||
|
optional sint32 default_sint32 = 207 [default = 46];
|
||||||
|
optional sint64 default_sint64 = 208 [default = 47];
|
||||||
|
optional fixed32 default_fixed32 = 209 [default = 48];
|
||||||
|
optional fixed64 default_fixed64 = 210 [default = 49];
|
||||||
|
optional sfixed32 default_sfixed32 = 211 [default = 50];
|
||||||
|
optional sfixed64 default_sfixed64 = 212 [default = 51];
|
||||||
|
optional bool default_bool = 213 [default = true];
|
||||||
|
optional string default_string = 214 [default = "Hello, string"];
|
||||||
|
optional bytes default_bytes = 215 [default = "Hello, bytes"];
|
||||||
|
optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE];
|
||||||
|
}
|
||||||
|
|
||||||
|
message MyMessageSet {
|
||||||
|
option message_set_wire_format = true;
|
||||||
|
extensions 100 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Empty {
|
||||||
|
}
|
||||||
|
|
||||||
|
extend MyMessageSet {
|
||||||
|
optional Empty x201 = 201;
|
||||||
|
optional Empty x202 = 202;
|
||||||
|
optional Empty x203 = 203;
|
||||||
|
optional Empty x204 = 204;
|
||||||
|
optional Empty x205 = 205;
|
||||||
|
optional Empty x206 = 206;
|
||||||
|
optional Empty x207 = 207;
|
||||||
|
optional Empty x208 = 208;
|
||||||
|
optional Empty x209 = 209;
|
||||||
|
optional Empty x210 = 210;
|
||||||
|
optional Empty x211 = 211;
|
||||||
|
optional Empty x212 = 212;
|
||||||
|
optional Empty x213 = 213;
|
||||||
|
optional Empty x214 = 214;
|
||||||
|
optional Empty x215 = 215;
|
||||||
|
optional Empty x216 = 216;
|
||||||
|
optional Empty x217 = 217;
|
||||||
|
optional Empty x218 = 218;
|
||||||
|
optional Empty x219 = 219;
|
||||||
|
optional Empty x220 = 220;
|
||||||
|
optional Empty x221 = 221;
|
||||||
|
optional Empty x222 = 222;
|
||||||
|
optional Empty x223 = 223;
|
||||||
|
optional Empty x224 = 224;
|
||||||
|
optional Empty x225 = 225;
|
||||||
|
optional Empty x226 = 226;
|
||||||
|
optional Empty x227 = 227;
|
||||||
|
optional Empty x228 = 228;
|
||||||
|
optional Empty x229 = 229;
|
||||||
|
optional Empty x230 = 230;
|
||||||
|
optional Empty x231 = 231;
|
||||||
|
optional Empty x232 = 232;
|
||||||
|
optional Empty x233 = 233;
|
||||||
|
optional Empty x234 = 234;
|
||||||
|
optional Empty x235 = 235;
|
||||||
|
optional Empty x236 = 236;
|
||||||
|
optional Empty x237 = 237;
|
||||||
|
optional Empty x238 = 238;
|
||||||
|
optional Empty x239 = 239;
|
||||||
|
optional Empty x240 = 240;
|
||||||
|
optional Empty x241 = 241;
|
||||||
|
optional Empty x242 = 242;
|
||||||
|
optional Empty x243 = 243;
|
||||||
|
optional Empty x244 = 244;
|
||||||
|
optional Empty x245 = 245;
|
||||||
|
optional Empty x246 = 246;
|
||||||
|
optional Empty x247 = 247;
|
||||||
|
optional Empty x248 = 248;
|
||||||
|
optional Empty x249 = 249;
|
||||||
|
optional Empty x250 = 250;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MessageList {
|
||||||
|
repeated group Message = 1 {
|
||||||
|
required string name = 2;
|
||||||
|
required int32 count = 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message Strings {
|
||||||
|
optional string string_field = 1;
|
||||||
|
optional bytes bytes_field = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Defaults {
|
||||||
|
enum Color {
|
||||||
|
RED = 0;
|
||||||
|
GREEN = 1;
|
||||||
|
BLUE = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default-valued fields of all basic types.
|
||||||
|
// Same as GoTest, but copied here to make testing easier.
|
||||||
|
optional bool F_Bool = 1 [default=true];
|
||||||
|
optional int32 F_Int32 = 2 [default=32];
|
||||||
|
optional int64 F_Int64 = 3 [default=64];
|
||||||
|
optional fixed32 F_Fixed32 = 4 [default=320];
|
||||||
|
optional fixed64 F_Fixed64 = 5 [default=640];
|
||||||
|
optional uint32 F_Uint32 = 6 [default=3200];
|
||||||
|
optional uint64 F_Uint64 = 7 [default=6400];
|
||||||
|
optional float F_Float = 8 [default=314159.];
|
||||||
|
optional double F_Double = 9 [default=271828.];
|
||||||
|
optional string F_String = 10 [default="hello, \"world!\"\n"];
|
||||||
|
optional bytes F_Bytes = 11 [default="Bignose"];
|
||||||
|
optional sint32 F_Sint32 = 12 [default=-32];
|
||||||
|
optional sint64 F_Sint64 = 13 [default=-64];
|
||||||
|
optional Color F_Enum = 14 [default=GREEN];
|
||||||
|
|
||||||
|
// More fields with crazy defaults.
|
||||||
|
optional float F_Pinf = 15 [default=inf];
|
||||||
|
optional float F_Ninf = 16 [default=-inf];
|
||||||
|
optional float F_Nan = 17 [default=nan];
|
||||||
|
|
||||||
|
// Sub-message.
|
||||||
|
optional SubDefaults sub = 18;
|
||||||
|
|
||||||
|
// Redundant but explicit defaults.
|
||||||
|
optional string str_zero = 19 [default=""];
|
||||||
|
}
|
||||||
|
|
||||||
|
message SubDefaults {
|
||||||
|
optional int64 n = 1 [default=7];
|
||||||
|
}
|
||||||
|
|
||||||
|
message RepeatedEnum {
|
||||||
|
enum Color {
|
||||||
|
RED = 1;
|
||||||
|
}
|
||||||
|
repeated Color color = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MoreRepeated {
|
||||||
|
repeated bool bools = 1;
|
||||||
|
repeated bool bools_packed = 2 [packed=true];
|
||||||
|
repeated int32 ints = 3;
|
||||||
|
repeated int32 ints_packed = 4 [packed=true];
|
||||||
|
repeated int64 int64s_packed = 7 [packed=true];
|
||||||
|
repeated string strings = 5;
|
||||||
|
repeated fixed32 fixeds = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupOld and GroupNew have the same wire format.
|
||||||
|
// GroupNew has a new field inside a group.
|
||||||
|
|
||||||
|
message GroupOld {
|
||||||
|
optional group G = 101 {
|
||||||
|
optional int32 x = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message GroupNew {
|
||||||
|
optional group G = 101 {
|
||||||
|
optional int32 x = 2;
|
||||||
|
optional int32 y = 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message FloatingPoint {
|
||||||
|
required double f = 1;
|
||||||
|
optional bool exact = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MessageWithMap {
|
||||||
|
map<int32, string> name_mapping = 1;
|
||||||
|
map<sint64, FloatingPoint> msg_mapping = 2;
|
||||||
|
map<bool, bytes> byte_mapping = 3;
|
||||||
|
map<string, string> str_to_str = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Oneof {
|
||||||
|
oneof union {
|
||||||
|
bool F_Bool = 1;
|
||||||
|
int32 F_Int32 = 2;
|
||||||
|
int64 F_Int64 = 3;
|
||||||
|
fixed32 F_Fixed32 = 4;
|
||||||
|
fixed64 F_Fixed64 = 5;
|
||||||
|
uint32 F_Uint32 = 6;
|
||||||
|
uint64 F_Uint64 = 7;
|
||||||
|
float F_Float = 8;
|
||||||
|
double F_Double = 9;
|
||||||
|
string F_String = 10;
|
||||||
|
bytes F_Bytes = 11;
|
||||||
|
sint32 F_Sint32 = 12;
|
||||||
|
sint64 F_Sint64 = 13;
|
||||||
|
MyMessage.Color F_Enum = 14;
|
||||||
|
GoTestField F_Message = 15;
|
||||||
|
group F_Group = 16 {
|
||||||
|
optional int32 x = 17;
|
||||||
|
}
|
||||||
|
int32 F_Largest_Tag = 536870911;
|
||||||
|
}
|
||||||
|
|
||||||
|
oneof tormato {
|
||||||
|
int32 value = 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message Communique {
|
||||||
|
optional bool make_me_cry = 1;
|
||||||
|
|
||||||
|
// This is a oneof, called "union".
|
||||||
|
oneof union {
|
||||||
|
int32 number = 5;
|
||||||
|
string name = 6;
|
||||||
|
bytes data = 7;
|
||||||
|
double temp_c = 8;
|
||||||
|
MyMessage.Color col = 9;
|
||||||
|
Strings msg = 10;
|
||||||
|
}
|
||||||
|
}
|
854
vendor/src/github.com/golang/protobuf/proto/text.go
vendored
Normal file
854
vendor/src/github.com/golang/protobuf/proto/text.go
vendored
Normal file
|
@ -0,0 +1,854 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
// Functions for writing the text protocol buffer format.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"encoding"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
newline = []byte("\n")
|
||||||
|
spaces = []byte(" ")
|
||||||
|
gtNewline = []byte(">\n")
|
||||||
|
endBraceNewline = []byte("}\n")
|
||||||
|
backslashN = []byte{'\\', 'n'}
|
||||||
|
backslashR = []byte{'\\', 'r'}
|
||||||
|
backslashT = []byte{'\\', 't'}
|
||||||
|
backslashDQ = []byte{'\\', '"'}
|
||||||
|
backslashBS = []byte{'\\', '\\'}
|
||||||
|
posInf = []byte("inf")
|
||||||
|
negInf = []byte("-inf")
|
||||||
|
nan = []byte("nan")
|
||||||
|
)
|
||||||
|
|
||||||
|
type writer interface {
|
||||||
|
io.Writer
|
||||||
|
WriteByte(byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// textWriter is an io.Writer that tracks its indentation level.
|
||||||
|
type textWriter struct {
|
||||||
|
ind int
|
||||||
|
complete bool // if the current position is a complete line
|
||||||
|
compact bool // whether to write out as a one-liner
|
||||||
|
w writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) WriteString(s string) (n int, err error) {
|
||||||
|
if !strings.Contains(s, "\n") {
|
||||||
|
if !w.compact && w.complete {
|
||||||
|
w.writeIndent()
|
||||||
|
}
|
||||||
|
w.complete = false
|
||||||
|
return io.WriteString(w.w, s)
|
||||||
|
}
|
||||||
|
// WriteString is typically called without newlines, so this
|
||||||
|
// codepath and its copy are rare. We copy to avoid
|
||||||
|
// duplicating all of Write's logic here.
|
||||||
|
return w.Write([]byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) Write(p []byte) (n int, err error) {
|
||||||
|
newlines := bytes.Count(p, newline)
|
||||||
|
if newlines == 0 {
|
||||||
|
if !w.compact && w.complete {
|
||||||
|
w.writeIndent()
|
||||||
|
}
|
||||||
|
n, err = w.w.Write(p)
|
||||||
|
w.complete = false
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
frags := bytes.SplitN(p, newline, newlines+1)
|
||||||
|
if w.compact {
|
||||||
|
for i, frag := range frags {
|
||||||
|
if i > 0 {
|
||||||
|
if err := w.w.WriteByte(' '); err != nil {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
nn, err := w.w.Write(frag)
|
||||||
|
n += nn
|
||||||
|
if err != nil {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, frag := range frags {
|
||||||
|
if w.complete {
|
||||||
|
w.writeIndent()
|
||||||
|
}
|
||||||
|
nn, err := w.w.Write(frag)
|
||||||
|
n += nn
|
||||||
|
if err != nil {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
if i+1 < len(frags) {
|
||||||
|
if err := w.w.WriteByte('\n'); err != nil {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.complete = len(frags[len(frags)-1]) == 0
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) WriteByte(c byte) error {
|
||||||
|
if w.compact && c == '\n' {
|
||||||
|
c = ' '
|
||||||
|
}
|
||||||
|
if !w.compact && w.complete {
|
||||||
|
w.writeIndent()
|
||||||
|
}
|
||||||
|
err := w.w.WriteByte(c)
|
||||||
|
w.complete = c == '\n'
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) indent() { w.ind++ }
|
||||||
|
|
||||||
|
func (w *textWriter) unindent() {
|
||||||
|
if w.ind == 0 {
|
||||||
|
log.Print("proto: textWriter unindented too far")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.ind--
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeName(w *textWriter, props *Properties) error {
|
||||||
|
if _, err := w.WriteString(props.OrigName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if props.Wire != "group" {
|
||||||
|
return w.WriteByte(':')
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// raw is the interface satisfied by RawMessage.
|
||||||
|
type raw interface {
|
||||||
|
Bytes() []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func requiresQuotes(u string) bool {
|
||||||
|
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||||
|
for _, ch := range u {
|
||||||
|
switch {
|
||||||
|
case ch == '.' || ch == '/' || ch == '_':
|
||||||
|
continue
|
||||||
|
case '0' <= ch && ch <= '9':
|
||||||
|
continue
|
||||||
|
case 'A' <= ch && ch <= 'Z':
|
||||||
|
continue
|
||||||
|
case 'a' <= ch && ch <= 'z':
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// isAny reports whether sv is a google.protobuf.Any message
|
||||||
|
func isAny(sv reflect.Value) bool {
|
||||||
|
type wkt interface {
|
||||||
|
XXX_WellKnownType() string
|
||||||
|
}
|
||||||
|
t, ok := sv.Addr().Interface().(wkt)
|
||||||
|
return ok && t.XXX_WellKnownType() == "Any"
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||||
|
//
|
||||||
|
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||||
|
// required messages are not linked in).
|
||||||
|
//
|
||||||
|
// It returns (true, error) when sv was written in expanded format or an error
|
||||||
|
// was encountered.
|
||||||
|
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
||||||
|
turl := sv.FieldByName("TypeUrl")
|
||||||
|
val := sv.FieldByName("Value")
|
||||||
|
if !turl.IsValid() || !val.IsValid() {
|
||||||
|
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||||
|
}
|
||||||
|
|
||||||
|
b, ok := val.Interface().([]byte)
|
||||||
|
if !ok {
|
||||||
|
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.Split(turl.String(), "/")
|
||||||
|
mt := MessageType(parts[len(parts)-1])
|
||||||
|
if mt == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
m := reflect.New(mt.Elem())
|
||||||
|
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
w.Write([]byte("["))
|
||||||
|
u := turl.String()
|
||||||
|
if requiresQuotes(u) {
|
||||||
|
writeString(w, u)
|
||||||
|
} else {
|
||||||
|
w.Write([]byte(u))
|
||||||
|
}
|
||||||
|
if w.compact {
|
||||||
|
w.Write([]byte("]:<"))
|
||||||
|
} else {
|
||||||
|
w.Write([]byte("]: <\n"))
|
||||||
|
w.ind++
|
||||||
|
}
|
||||||
|
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
if w.compact {
|
||||||
|
w.Write([]byte("> "))
|
||||||
|
} else {
|
||||||
|
w.ind--
|
||||||
|
w.Write([]byte(">\n"))
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||||
|
if tm.ExpandAny && isAny(sv) {
|
||||||
|
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
st := sv.Type()
|
||||||
|
sprops := GetProperties(st)
|
||||||
|
for i := 0; i < sv.NumField(); i++ {
|
||||||
|
fv := sv.Field(i)
|
||||||
|
props := sprops.Prop[i]
|
||||||
|
name := st.Field(i).Name
|
||||||
|
|
||||||
|
if strings.HasPrefix(name, "XXX_") {
|
||||||
|
// There are two XXX_ fields:
|
||||||
|
// XXX_unrecognized []byte
|
||||||
|
// XXX_extensions map[int32]proto.Extension
|
||||||
|
// The first is handled here;
|
||||||
|
// the second is handled at the bottom of this function.
|
||||||
|
if name == "XXX_unrecognized" && !fv.IsNil() {
|
||||||
|
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||||
|
// Field not filled in. This could be an optional field or
|
||||||
|
// a required field that wasn't filled in. Either way, there
|
||||||
|
// isn't anything we can show for it.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
||||||
|
// Repeated field that is empty, or a bytes field that is unused.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if props.Repeated && fv.Kind() == reflect.Slice {
|
||||||
|
// Repeated field.
|
||||||
|
for j := 0; j < fv.Len(); j++ {
|
||||||
|
if err := writeName(w, props); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !w.compact {
|
||||||
|
if err := w.WriteByte(' '); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v := fv.Index(j)
|
||||||
|
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||||
|
// A nil message in a repeated field is not valid,
|
||||||
|
// but we can handle that more gracefully than panicking.
|
||||||
|
if _, err := w.Write([]byte("<nil>\n")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := tm.writeAny(w, v, props); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fv.Kind() == reflect.Map {
|
||||||
|
// Map fields are rendered as a repeated struct with key/value fields.
|
||||||
|
keys := fv.MapKeys()
|
||||||
|
sort.Sort(mapKeys(keys))
|
||||||
|
for _, key := range keys {
|
||||||
|
val := fv.MapIndex(key)
|
||||||
|
if err := writeName(w, props); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !w.compact {
|
||||||
|
if err := w.WriteByte(' '); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// open struct
|
||||||
|
if err := w.WriteByte('<'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !w.compact {
|
||||||
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.indent()
|
||||||
|
// key
|
||||||
|
if _, err := w.WriteString("key:"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !w.compact {
|
||||||
|
if err := w.WriteByte(' '); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// nil values aren't legal, but we can avoid panicking because of them.
|
||||||
|
if val.Kind() != reflect.Ptr || !val.IsNil() {
|
||||||
|
// value
|
||||||
|
if _, err := w.WriteString("value:"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !w.compact {
|
||||||
|
if err := w.WriteByte(' '); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// close struct
|
||||||
|
w.unindent()
|
||||||
|
if err := w.WriteByte('>'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
|
||||||
|
// empty bytes field
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
|
||||||
|
// proto3 non-repeated scalar field; skip if zero value
|
||||||
|
if isProto3Zero(fv) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fv.Kind() == reflect.Interface {
|
||||||
|
// Check if it is a oneof.
|
||||||
|
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
|
||||||
|
// fv is nil, or holds a pointer to generated struct.
|
||||||
|
// That generated struct has exactly one field,
|
||||||
|
// which has a protobuf struct tag.
|
||||||
|
if fv.IsNil() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
inner := fv.Elem().Elem() // interface -> *T -> T
|
||||||
|
tag := inner.Type().Field(0).Tag.Get("protobuf")
|
||||||
|
props = new(Properties) // Overwrite the outer props var, but not its pointee.
|
||||||
|
props.Parse(tag)
|
||||||
|
// Write the value in the oneof, not the oneof itself.
|
||||||
|
fv = inner.Field(0)
|
||||||
|
|
||||||
|
// Special case to cope with malformed messages gracefully:
|
||||||
|
// If the value in the oneof is a nil pointer, don't panic
|
||||||
|
// in writeAny.
|
||||||
|
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||||
|
// Use errors.New so writeAny won't render quotes.
|
||||||
|
msg := errors.New("/* nil */")
|
||||||
|
fv = reflect.ValueOf(&msg).Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writeName(w, props); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !w.compact {
|
||||||
|
if err := w.WriteByte(' '); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b, ok := fv.Interface().(raw); ok {
|
||||||
|
if err := writeRaw(w, b.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enums have a String method, so writeAny will work fine.
|
||||||
|
if err := tm.writeAny(w, fv, props); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extensions (the XXX_extensions field).
|
||||||
|
pv := sv.Addr()
|
||||||
|
if _, ok := extendable(pv.Interface()); ok {
|
||||||
|
if err := tm.writeExtensions(w, pv); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeRaw writes an uninterpreted raw message.
|
||||||
|
func writeRaw(w *textWriter, b []byte) error {
|
||||||
|
if err := w.WriteByte('<'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !w.compact {
|
||||||
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.indent()
|
||||||
|
if err := writeUnknownStruct(w, b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.unindent()
|
||||||
|
if err := w.WriteByte('>'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeAny writes an arbitrary field.
|
||||||
|
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||||
|
v = reflect.Indirect(v)
|
||||||
|
|
||||||
|
// Floats have special cases.
|
||||||
|
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
|
||||||
|
x := v.Float()
|
||||||
|
var b []byte
|
||||||
|
switch {
|
||||||
|
case math.IsInf(x, 1):
|
||||||
|
b = posInf
|
||||||
|
case math.IsInf(x, -1):
|
||||||
|
b = negInf
|
||||||
|
case math.IsNaN(x):
|
||||||
|
b = nan
|
||||||
|
}
|
||||||
|
if b != nil {
|
||||||
|
_, err := w.Write(b)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Other values are handled below.
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't attempt to serialise every possible value type; only those
|
||||||
|
// that can occur in protocol buffers.
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
// Should only be a []byte; repeated fields are handled in writeStruct.
|
||||||
|
if err := writeString(w, string(v.Bytes())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
if err := writeString(w, v.String()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
// Required/optional group/message.
|
||||||
|
var bra, ket byte = '<', '>'
|
||||||
|
if props != nil && props.Wire == "group" {
|
||||||
|
bra, ket = '{', '}'
|
||||||
|
}
|
||||||
|
if err := w.WriteByte(bra); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !w.compact {
|
||||||
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.indent()
|
||||||
|
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||||
|
text, err := etm.MarshalText()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err = w.Write(text); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if err := tm.writeStruct(w, v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.unindent()
|
||||||
|
if err := w.WriteByte(ket); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_, err := fmt.Fprint(w, v.Interface())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// equivalent to C's isprint.
|
||||||
|
func isprint(c byte) bool {
|
||||||
|
return c >= 0x20 && c < 0x7f
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeString writes a string in the protocol buffer text format.
|
||||||
|
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
||||||
|
// we treat the string as a byte sequence, and we use octal escapes.
|
||||||
|
// These differences are to maintain interoperability with the other
|
||||||
|
// languages' implementations of the text format.
|
||||||
|
func writeString(w *textWriter, s string) error {
|
||||||
|
// use WriteByte here to get any needed indent
|
||||||
|
if err := w.WriteByte('"'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Loop over the bytes, not the runes.
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
var err error
|
||||||
|
// Divergence from C++: we don't escape apostrophes.
|
||||||
|
// There's no need to escape them, and the C++ parser
|
||||||
|
// copes with a naked apostrophe.
|
||||||
|
switch c := s[i]; c {
|
||||||
|
case '\n':
|
||||||
|
_, err = w.w.Write(backslashN)
|
||||||
|
case '\r':
|
||||||
|
_, err = w.w.Write(backslashR)
|
||||||
|
case '\t':
|
||||||
|
_, err = w.w.Write(backslashT)
|
||||||
|
case '"':
|
||||||
|
_, err = w.w.Write(backslashDQ)
|
||||||
|
case '\\':
|
||||||
|
_, err = w.w.Write(backslashBS)
|
||||||
|
default:
|
||||||
|
if isprint(c) {
|
||||||
|
err = w.w.WriteByte(c)
|
||||||
|
} else {
|
||||||
|
_, err = fmt.Fprintf(w.w, "\\%03o", c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return w.WriteByte('"')
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
||||||
|
if !w.compact {
|
||||||
|
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b := NewBuffer(data)
|
||||||
|
for b.index < len(b.buf) {
|
||||||
|
x, err := b.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
_, err := fmt.Fprintf(w, "/* %v */\n", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
wire, tag := x&7, x>>3
|
||||||
|
if wire == WireEndGroup {
|
||||||
|
w.unindent()
|
||||||
|
if _, err := w.Write(endBraceNewline); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, err := fmt.Fprint(w, tag); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if wire != WireStartGroup {
|
||||||
|
if err := w.WriteByte(':'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !w.compact || wire == WireStartGroup {
|
||||||
|
if err := w.WriteByte(' '); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch wire {
|
||||||
|
case WireBytes:
|
||||||
|
buf, e := b.DecodeRawBytes(false)
|
||||||
|
if e == nil {
|
||||||
|
_, err = fmt.Fprintf(w, "%q", buf)
|
||||||
|
} else {
|
||||||
|
_, err = fmt.Fprintf(w, "/* %v */", e)
|
||||||
|
}
|
||||||
|
case WireFixed32:
|
||||||
|
x, err = b.DecodeFixed32()
|
||||||
|
err = writeUnknownInt(w, x, err)
|
||||||
|
case WireFixed64:
|
||||||
|
x, err = b.DecodeFixed64()
|
||||||
|
err = writeUnknownInt(w, x, err)
|
||||||
|
case WireStartGroup:
|
||||||
|
err = w.WriteByte('{')
|
||||||
|
w.indent()
|
||||||
|
case WireVarint:
|
||||||
|
x, err = b.DecodeVarint()
|
||||||
|
err = writeUnknownInt(w, x, err)
|
||||||
|
default:
|
||||||
|
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = w.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeUnknownInt(w *textWriter, x uint64, err error) error {
|
||||||
|
if err == nil {
|
||||||
|
_, err = fmt.Fprint(w, x)
|
||||||
|
} else {
|
||||||
|
_, err = fmt.Fprintf(w, "/* %v */", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type int32Slice []int32
|
||||||
|
|
||||||
|
func (s int32Slice) Len() int { return len(s) }
|
||||||
|
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||||
|
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
|
||||||
|
// writeExtensions writes all the extensions in pv.
|
||||||
|
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||||
|
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||||
|
emap := extensionMaps[pv.Type().Elem()]
|
||||||
|
ep, _ := extendable(pv.Interface())
|
||||||
|
|
||||||
|
// Order the extensions by ID.
|
||||||
|
// This isn't strictly necessary, but it will give us
|
||||||
|
// canonical output, which will also make testing easier.
|
||||||
|
m, mu := ep.extensionsRead()
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
ids := make([]int32, 0, len(m))
|
||||||
|
for id := range m {
|
||||||
|
ids = append(ids, id)
|
||||||
|
}
|
||||||
|
sort.Sort(int32Slice(ids))
|
||||||
|
mu.Unlock()
|
||||||
|
|
||||||
|
for _, extNum := range ids {
|
||||||
|
ext := m[extNum]
|
||||||
|
var desc *ExtensionDesc
|
||||||
|
if emap != nil {
|
||||||
|
desc = emap[extNum]
|
||||||
|
}
|
||||||
|
if desc == nil {
|
||||||
|
// Unknown extension.
|
||||||
|
if err := writeUnknownStruct(w, ext.enc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pb, err := GetExtension(ep, desc)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed getting extension: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Repeated extensions will appear as a slice.
|
||||||
|
if !desc.repeated() {
|
||||||
|
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
v := reflect.ValueOf(pb)
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||||
|
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !w.compact {
|
||||||
|
if err := w.WriteByte(' '); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeIndent() {
|
||||||
|
if !w.complete {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
remain := w.ind * 2
|
||||||
|
for remain > 0 {
|
||||||
|
n := remain
|
||||||
|
if n > len(spaces) {
|
||||||
|
n = len(spaces)
|
||||||
|
}
|
||||||
|
w.w.Write(spaces[:n])
|
||||||
|
remain -= n
|
||||||
|
}
|
||||||
|
w.complete = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// TextMarshaler is a configurable text format marshaler.
|
||||||
|
type TextMarshaler struct {
|
||||||
|
Compact bool // use compact text format (one line).
|
||||||
|
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal writes a given protocol buffer in text format.
|
||||||
|
// The only errors returned are from w.
|
||||||
|
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||||
|
val := reflect.ValueOf(pb)
|
||||||
|
if pb == nil || val.IsNil() {
|
||||||
|
w.Write([]byte("<nil>"))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var bw *bufio.Writer
|
||||||
|
ww, ok := w.(writer)
|
||||||
|
if !ok {
|
||||||
|
bw = bufio.NewWriter(w)
|
||||||
|
ww = bw
|
||||||
|
}
|
||||||
|
aw := &textWriter{
|
||||||
|
w: ww,
|
||||||
|
complete: true,
|
||||||
|
compact: tm.Compact,
|
||||||
|
}
|
||||||
|
|
||||||
|
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
||||||
|
text, err := etm.MarshalText()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err = aw.Write(text); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if bw != nil {
|
||||||
|
return bw.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Dereference the received pointer so we don't have outer < and >.
|
||||||
|
v := reflect.Indirect(val)
|
||||||
|
if err := tm.writeStruct(aw, v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if bw != nil {
|
||||||
|
return bw.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Text is the same as Marshal, but returns the string directly.
|
||||||
|
func (tm *TextMarshaler) Text(pb Message) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
tm.Marshal(&buf, pb)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultTextMarshaler = TextMarshaler{}
|
||||||
|
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: consider removing some of the Marshal functions below.
|
||||||
|
|
||||||
|
// MarshalText writes a given protocol buffer in text format.
|
||||||
|
// The only errors returned are from w.
|
||||||
|
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
|
||||||
|
|
||||||
|
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
||||||
|
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
|
||||||
|
|
||||||
|
// CompactText writes a given protocol buffer in compact text format (one line).
|
||||||
|
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
|
||||||
|
|
||||||
|
// CompactTextString is the same as CompactText, but returns the string directly.
|
||||||
|
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
|
895
vendor/src/github.com/golang/protobuf/proto/text_parser.go
vendored
Normal file
895
vendor/src/github.com/golang/protobuf/proto/text_parser.go
vendored
Normal file
|
@ -0,0 +1,895 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
// Functions for parsing the Text protocol buffer format.
|
||||||
|
// TODO: message sets.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error string emitted when deserializing Any and fields are already set
|
||||||
|
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
||||||
|
|
||||||
|
type ParseError struct {
|
||||||
|
Message string
|
||||||
|
Line int // 1-based line number
|
||||||
|
Offset int // 0-based byte offset from start of input
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ParseError) Error() string {
|
||||||
|
if p.Line == 1 {
|
||||||
|
// show offset only for first line
|
||||||
|
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
type token struct {
|
||||||
|
value string
|
||||||
|
err *ParseError
|
||||||
|
line int // line number
|
||||||
|
offset int // byte number from start of input, not start of line
|
||||||
|
unquoted string // the unquoted version of value, if it was a quoted string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *token) String() string {
|
||||||
|
if t.err == nil {
|
||||||
|
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("parse error: %v", t.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
type textParser struct {
|
||||||
|
s string // remaining input
|
||||||
|
done bool // whether the parsing is finished (success or error)
|
||||||
|
backed bool // whether back() was called
|
||||||
|
offset, line int
|
||||||
|
cur token
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTextParser(s string) *textParser {
|
||||||
|
p := new(textParser)
|
||||||
|
p.s = s
|
||||||
|
p.line = 1
|
||||||
|
p.cur.line = 1
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||||
|
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||||
|
p.cur.err = pe
|
||||||
|
p.done = true
|
||||||
|
return pe
|
||||||
|
}
|
||||||
|
|
||||||
|
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
||||||
|
func isIdentOrNumberChar(c byte) bool {
|
||||||
|
switch {
|
||||||
|
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||||
|
return true
|
||||||
|
case '0' <= c && c <= '9':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
switch c {
|
||||||
|
case '-', '+', '.', '_':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWhitespace(c byte) bool {
|
||||||
|
switch c {
|
||||||
|
case ' ', '\t', '\n', '\r':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isQuote(c byte) bool {
|
||||||
|
switch c {
|
||||||
|
case '"', '\'':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) skipWhitespace() {
|
||||||
|
i := 0
|
||||||
|
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||||
|
if p.s[i] == '#' {
|
||||||
|
// comment; skip to end of line or input
|
||||||
|
for i < len(p.s) && p.s[i] != '\n' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i == len(p.s) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if p.s[i] == '\n' {
|
||||||
|
p.line++
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
p.offset += i
|
||||||
|
p.s = p.s[i:len(p.s)]
|
||||||
|
if len(p.s) == 0 {
|
||||||
|
p.done = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) advance() {
|
||||||
|
// Skip whitespace
|
||||||
|
p.skipWhitespace()
|
||||||
|
if p.done {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start of non-whitespace
|
||||||
|
p.cur.err = nil
|
||||||
|
p.cur.offset, p.cur.line = p.offset, p.line
|
||||||
|
p.cur.unquoted = ""
|
||||||
|
switch p.s[0] {
|
||||||
|
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||||
|
// Single symbol
|
||||||
|
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||||
|
case '"', '\'':
|
||||||
|
// Quoted string
|
||||||
|
i := 1
|
||||||
|
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||||
|
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||||
|
// skip escaped char
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||||
|
p.errorf("unmatched quote")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||||
|
if err != nil {
|
||||||
|
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||||
|
p.cur.unquoted = unq
|
||||||
|
default:
|
||||||
|
i := 0
|
||||||
|
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
p.errorf("unexpected byte %#x", p.s[0])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||||
|
}
|
||||||
|
p.offset += len(p.cur.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||||
|
errBadHex = errors.New("proto: bad hexadecimal")
|
||||||
|
)
|
||||||
|
|
||||||
|
func unquoteC(s string, quote rune) (string, error) {
|
||||||
|
// This is based on C++'s tokenizer.cc.
|
||||||
|
// Despite its name, this is *not* parsing C syntax.
|
||||||
|
// For instance, "\0" is an invalid quoted string.
|
||||||
|
|
||||||
|
// Avoid allocation in trivial cases.
|
||||||
|
simple := true
|
||||||
|
for _, r := range s {
|
||||||
|
if r == '\\' || r == quote {
|
||||||
|
simple = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if simple {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, 0, 3*len(s)/2)
|
||||||
|
for len(s) > 0 {
|
||||||
|
r, n := utf8.DecodeRuneInString(s)
|
||||||
|
if r == utf8.RuneError && n == 1 {
|
||||||
|
return "", errBadUTF8
|
||||||
|
}
|
||||||
|
s = s[n:]
|
||||||
|
if r != '\\' {
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
buf = append(buf, byte(r))
|
||||||
|
} else {
|
||||||
|
buf = append(buf, string(r)...)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, tail, err := unescape(s)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
buf = append(buf, ch...)
|
||||||
|
s = tail
|
||||||
|
}
|
||||||
|
return string(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unescape(s string) (ch string, tail string, err error) {
|
||||||
|
r, n := utf8.DecodeRuneInString(s)
|
||||||
|
if r == utf8.RuneError && n == 1 {
|
||||||
|
return "", "", errBadUTF8
|
||||||
|
}
|
||||||
|
s = s[n:]
|
||||||
|
switch r {
|
||||||
|
case 'a':
|
||||||
|
return "\a", s, nil
|
||||||
|
case 'b':
|
||||||
|
return "\b", s, nil
|
||||||
|
case 'f':
|
||||||
|
return "\f", s, nil
|
||||||
|
case 'n':
|
||||||
|
return "\n", s, nil
|
||||||
|
case 'r':
|
||||||
|
return "\r", s, nil
|
||||||
|
case 't':
|
||||||
|
return "\t", s, nil
|
||||||
|
case 'v':
|
||||||
|
return "\v", s, nil
|
||||||
|
case '?':
|
||||||
|
return "?", s, nil // trigraph workaround
|
||||||
|
case '\'', '"', '\\':
|
||||||
|
return string(r), s, nil
|
||||||
|
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
|
||||||
|
if len(s) < 2 {
|
||||||
|
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||||
|
}
|
||||||
|
base := 8
|
||||||
|
ss := s[:2]
|
||||||
|
s = s[2:]
|
||||||
|
if r == 'x' || r == 'X' {
|
||||||
|
base = 16
|
||||||
|
} else {
|
||||||
|
ss = string(r) + ss
|
||||||
|
}
|
||||||
|
i, err := strconv.ParseUint(ss, base, 8)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
return string([]byte{byte(i)}), s, nil
|
||||||
|
case 'u', 'U':
|
||||||
|
n := 4
|
||||||
|
if r == 'U' {
|
||||||
|
n = 8
|
||||||
|
}
|
||||||
|
if len(s) < n {
|
||||||
|
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
bs := make([]byte, n/2)
|
||||||
|
for i := 0; i < n; i += 2 {
|
||||||
|
a, ok1 := unhex(s[i])
|
||||||
|
b, ok2 := unhex(s[i+1])
|
||||||
|
if !ok1 || !ok2 {
|
||||||
|
return "", "", errBadHex
|
||||||
|
}
|
||||||
|
bs[i/2] = a<<4 | b
|
||||||
|
}
|
||||||
|
s = s[n:]
|
||||||
|
return string(bs), s, nil
|
||||||
|
}
|
||||||
|
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adapted from src/pkg/strconv/quote.go.
|
||||||
|
func unhex(b byte) (v byte, ok bool) {
|
||||||
|
switch {
|
||||||
|
case '0' <= b && b <= '9':
|
||||||
|
return b - '0', true
|
||||||
|
case 'a' <= b && b <= 'f':
|
||||||
|
return b - 'a' + 10, true
|
||||||
|
case 'A' <= b && b <= 'F':
|
||||||
|
return b - 'A' + 10, true
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Back off the parser by one token. Can only be done between calls to next().
|
||||||
|
// It makes the next advance() a no-op.
|
||||||
|
func (p *textParser) back() { p.backed = true }
|
||||||
|
|
||||||
|
// Advances the parser and returns the new current token.
|
||||||
|
func (p *textParser) next() *token {
|
||||||
|
if p.backed || p.done {
|
||||||
|
p.backed = false
|
||||||
|
return &p.cur
|
||||||
|
}
|
||||||
|
p.advance()
|
||||||
|
if p.done {
|
||||||
|
p.cur.value = ""
|
||||||
|
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||||
|
// Look for multiple quoted strings separated by whitespace,
|
||||||
|
// and concatenate them.
|
||||||
|
cat := p.cur
|
||||||
|
for {
|
||||||
|
p.skipWhitespace()
|
||||||
|
if p.done || !isQuote(p.s[0]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
p.advance()
|
||||||
|
if p.cur.err != nil {
|
||||||
|
return &p.cur
|
||||||
|
}
|
||||||
|
cat.value += " " + p.cur.value
|
||||||
|
cat.unquoted += p.cur.unquoted
|
||||||
|
}
|
||||||
|
p.done = false // parser may have seen EOF, but we want to return cat
|
||||||
|
p.cur = cat
|
||||||
|
}
|
||||||
|
return &p.cur
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) consumeToken(s string) error {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value != s {
|
||||||
|
p.back()
|
||||||
|
return p.errorf("expected %q, found %q", s, tok.value)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a RequiredNotSetError indicating which required field was not set.
|
||||||
|
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
|
||||||
|
st := sv.Type()
|
||||||
|
sprops := GetProperties(st)
|
||||||
|
for i := 0; i < st.NumField(); i++ {
|
||||||
|
if !isNil(sv.Field(i)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
props := sprops.Prop[i]
|
||||||
|
if props.Required {
|
||||||
|
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
||||||
|
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
|
||||||
|
i, ok := sprops.decoderOrigNames[name]
|
||||||
|
if ok {
|
||||||
|
return i, sprops.Prop[i], true
|
||||||
|
}
|
||||||
|
return -1, nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consume a ':' from the input stream (if the next token is a colon),
|
||||||
|
// returning an error if a colon is needed but not present.
|
||||||
|
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value != ":" {
|
||||||
|
// Colon is optional when the field is a group or message.
|
||||||
|
needColon := true
|
||||||
|
switch props.Wire {
|
||||||
|
case "group":
|
||||||
|
needColon = false
|
||||||
|
case "bytes":
|
||||||
|
// A "bytes" field is either a message, a string, or a repeated field;
|
||||||
|
// those three become *T, *string and []T respectively, so we can check for
|
||||||
|
// this field being a pointer to a non-string.
|
||||||
|
if typ.Kind() == reflect.Ptr {
|
||||||
|
// *T or *string
|
||||||
|
if typ.Elem().Kind() == reflect.String {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else if typ.Kind() == reflect.Slice {
|
||||||
|
// []T or []*T
|
||||||
|
if typ.Elem().Kind() != reflect.Ptr {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else if typ.Kind() == reflect.String {
|
||||||
|
// The proto3 exception is for a string field,
|
||||||
|
// which requires a colon.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
needColon = false
|
||||||
|
}
|
||||||
|
if needColon {
|
||||||
|
return p.errorf("expected ':', found %q", tok.value)
|
||||||
|
}
|
||||||
|
p.back()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||||
|
st := sv.Type()
|
||||||
|
sprops := GetProperties(st)
|
||||||
|
reqCount := sprops.reqCount
|
||||||
|
var reqFieldErr error
|
||||||
|
fieldSet := make(map[string]bool)
|
||||||
|
// A struct is a sequence of "name: value", terminated by one of
|
||||||
|
// '>' or '}', or the end of the input. A name may also be
|
||||||
|
// "[extension]" or "[type/url]".
|
||||||
|
//
|
||||||
|
// The whole struct can also be an expanded Any message, like:
|
||||||
|
// [type/url] < ... struct contents ... >
|
||||||
|
for {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value == terminator {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if tok.value == "[" {
|
||||||
|
// Looks like an extension or an Any.
|
||||||
|
//
|
||||||
|
// TODO: Check whether we need to handle
|
||||||
|
// namespace rooted names (e.g. ".something.Foo").
|
||||||
|
extName, err := p.consumeExtName()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
||||||
|
// If it contains a slash, it's an Any type URL.
|
||||||
|
messageName := extName[s+1:]
|
||||||
|
mt := MessageType(messageName)
|
||||||
|
if mt == nil {
|
||||||
|
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
||||||
|
}
|
||||||
|
tok = p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
// consume an optional colon
|
||||||
|
if tok.value == ":" {
|
||||||
|
tok = p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var terminator string
|
||||||
|
switch tok.value {
|
||||||
|
case "<":
|
||||||
|
terminator = ">"
|
||||||
|
case "{":
|
||||||
|
terminator = "}"
|
||||||
|
default:
|
||||||
|
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||||
|
}
|
||||||
|
v := reflect.New(mt.Elem())
|
||||||
|
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
||||||
|
return pe
|
||||||
|
}
|
||||||
|
b, err := Marshal(v.Interface().(Message))
|
||||||
|
if err != nil {
|
||||||
|
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||||
|
}
|
||||||
|
if fieldSet["type_url"] {
|
||||||
|
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
||||||
|
}
|
||||||
|
if fieldSet["value"] {
|
||||||
|
return p.errorf(anyRepeatedlyUnpacked, "value")
|
||||||
|
}
|
||||||
|
sv.FieldByName("TypeUrl").SetString(extName)
|
||||||
|
sv.FieldByName("Value").SetBytes(b)
|
||||||
|
fieldSet["type_url"] = true
|
||||||
|
fieldSet["value"] = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var desc *ExtensionDesc
|
||||||
|
// This could be faster, but it's functional.
|
||||||
|
// TODO: Do something smarter than a linear scan.
|
||||||
|
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||||
|
if d.Name == extName {
|
||||||
|
desc = d
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if desc == nil {
|
||||||
|
return p.errorf("unrecognized extension %q", extName)
|
||||||
|
}
|
||||||
|
|
||||||
|
props := &Properties{}
|
||||||
|
props.Parse(desc.Tag)
|
||||||
|
|
||||||
|
typ := reflect.TypeOf(desc.ExtensionType)
|
||||||
|
if err := p.checkForColon(props, typ); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rep := desc.repeated()
|
||||||
|
|
||||||
|
// Read the extension structure, and set it in
|
||||||
|
// the value we're constructing.
|
||||||
|
var ext reflect.Value
|
||||||
|
if !rep {
|
||||||
|
ext = reflect.New(typ).Elem()
|
||||||
|
} else {
|
||||||
|
ext = reflect.New(typ.Elem()).Elem()
|
||||||
|
}
|
||||||
|
if err := p.readAny(ext, props); err != nil {
|
||||||
|
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
reqFieldErr = err
|
||||||
|
}
|
||||||
|
ep := sv.Addr().Interface().(Message)
|
||||||
|
if !rep {
|
||||||
|
SetExtension(ep, desc, ext.Interface())
|
||||||
|
} else {
|
||||||
|
old, err := GetExtension(ep, desc)
|
||||||
|
var sl reflect.Value
|
||||||
|
if err == nil {
|
||||||
|
sl = reflect.ValueOf(old) // existing slice
|
||||||
|
} else {
|
||||||
|
sl = reflect.MakeSlice(typ, 0, 1)
|
||||||
|
}
|
||||||
|
sl = reflect.Append(sl, ext)
|
||||||
|
SetExtension(ep, desc, sl.Interface())
|
||||||
|
}
|
||||||
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is a normal, non-extension field.
|
||||||
|
name := tok.value
|
||||||
|
var dst reflect.Value
|
||||||
|
fi, props, ok := structFieldByName(sprops, name)
|
||||||
|
if ok {
|
||||||
|
dst = sv.Field(fi)
|
||||||
|
} else if oop, ok := sprops.OneofTypes[name]; ok {
|
||||||
|
// It is a oneof.
|
||||||
|
props = oop.Prop
|
||||||
|
nv := reflect.New(oop.Type.Elem())
|
||||||
|
dst = nv.Elem().Field(0)
|
||||||
|
field := sv.Field(oop.Field)
|
||||||
|
if !field.IsNil() {
|
||||||
|
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
||||||
|
}
|
||||||
|
field.Set(nv)
|
||||||
|
}
|
||||||
|
if !dst.IsValid() {
|
||||||
|
return p.errorf("unknown field name %q in %v", name, st)
|
||||||
|
}
|
||||||
|
|
||||||
|
if dst.Kind() == reflect.Map {
|
||||||
|
// Consume any colon.
|
||||||
|
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct the map if it doesn't already exist.
|
||||||
|
if dst.IsNil() {
|
||||||
|
dst.Set(reflect.MakeMap(dst.Type()))
|
||||||
|
}
|
||||||
|
key := reflect.New(dst.Type().Key()).Elem()
|
||||||
|
val := reflect.New(dst.Type().Elem()).Elem()
|
||||||
|
|
||||||
|
// The map entry should be this sequence of tokens:
|
||||||
|
// < key : KEY value : VALUE >
|
||||||
|
// However, implementations may omit key or value, and technically
|
||||||
|
// we should support them in any order. See b/28924776 for a time
|
||||||
|
// this went wrong.
|
||||||
|
|
||||||
|
tok := p.next()
|
||||||
|
var terminator string
|
||||||
|
switch tok.value {
|
||||||
|
case "<":
|
||||||
|
terminator = ">"
|
||||||
|
case "{":
|
||||||
|
terminator = "}"
|
||||||
|
default:
|
||||||
|
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value == terminator {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
switch tok.value {
|
||||||
|
case "key":
|
||||||
|
if err := p.consumeToken(":"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := p.readAny(key, props.mkeyprop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case "value":
|
||||||
|
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := p.readAny(val, props.mvalprop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
p.back()
|
||||||
|
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dst.SetMapIndex(key, val)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that it's not already set if it's not a repeated field.
|
||||||
|
if !props.Repeated && fieldSet[name] {
|
||||||
|
return p.errorf("non-repeated field %q was repeated", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse into the field.
|
||||||
|
fieldSet[name] = true
|
||||||
|
if err := p.readAny(dst, props); err != nil {
|
||||||
|
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
reqFieldErr = err
|
||||||
|
}
|
||||||
|
if props.Required {
|
||||||
|
reqCount--
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if reqCount > 0 {
|
||||||
|
return p.missingRequiredFieldError(sv)
|
||||||
|
}
|
||||||
|
return reqFieldErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumeExtName consumes extension name or expanded Any type URL and the
|
||||||
|
// following ']'. It returns the name or URL consumed.
|
||||||
|
func (p *textParser) consumeExtName() (string, error) {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return "", tok.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If extension name or type url is quoted, it's a single token.
|
||||||
|
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||||
|
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return name, p.consumeToken("]")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consume everything up to "]"
|
||||||
|
var parts []string
|
||||||
|
for tok.value != "]" {
|
||||||
|
parts = append(parts, tok.value)
|
||||||
|
tok = p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(parts, ""), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||||
|
// It is used in readStruct to provide backward compatibility.
|
||||||
|
func (p *textParser) consumeOptionalSeparator() error {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value != ";" && tok.value != "," {
|
||||||
|
p.back()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value == "" {
|
||||||
|
return p.errorf("unexpected EOF")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch fv := v; fv.Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
at := v.Type()
|
||||||
|
if at.Elem().Kind() == reflect.Uint8 {
|
||||||
|
// Special case for []byte
|
||||||
|
if tok.value[0] != '"' && tok.value[0] != '\'' {
|
||||||
|
// Deliberately written out here, as the error after
|
||||||
|
// this switch statement would write "invalid []byte: ...",
|
||||||
|
// which is not as user-friendly.
|
||||||
|
return p.errorf("invalid string: %v", tok.value)
|
||||||
|
}
|
||||||
|
bytes := []byte(tok.unquoted)
|
||||||
|
fv.Set(reflect.ValueOf(bytes))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Repeated field.
|
||||||
|
if tok.value == "[" {
|
||||||
|
// Repeated field with list notation, like [1,2,3].
|
||||||
|
for {
|
||||||
|
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||||
|
err := p.readAny(fv.Index(fv.Len()-1), props)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value == "]" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if tok.value != "," {
|
||||||
|
return p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// One value of the repeated field.
|
||||||
|
p.back()
|
||||||
|
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||||
|
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||||
|
case reflect.Bool:
|
||||||
|
// true/1/t/True or false/f/0/False.
|
||||||
|
switch tok.value {
|
||||||
|
case "true", "1", "t", "True":
|
||||||
|
fv.SetBool(true)
|
||||||
|
return nil
|
||||||
|
case "false", "0", "f", "False":
|
||||||
|
fv.SetBool(false)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
v := tok.value
|
||||||
|
// Ignore 'f' for compatibility with output generated by C++, but don't
|
||||||
|
// remove 'f' when the value is "-inf" or "inf".
|
||||||
|
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
|
||||||
|
v = v[:len(v)-1]
|
||||||
|
}
|
||||||
|
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
|
||||||
|
fv.SetFloat(f)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case reflect.Int32:
|
||||||
|
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||||
|
fv.SetInt(x)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(props.Enum) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
m, ok := enumValueMaps[props.Enum]
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x, ok := m[tok.value]
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fv.SetInt(int64(x))
|
||||||
|
return nil
|
||||||
|
case reflect.Int64:
|
||||||
|
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||||
|
fv.SetInt(x)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
// A basic field (indirected through pointer), or a repeated message/group
|
||||||
|
p.back()
|
||||||
|
fv.Set(reflect.New(fv.Type().Elem()))
|
||||||
|
return p.readAny(fv.Elem(), props)
|
||||||
|
case reflect.String:
|
||||||
|
if tok.value[0] == '"' || tok.value[0] == '\'' {
|
||||||
|
fv.SetString(tok.unquoted)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
var terminator string
|
||||||
|
switch tok.value {
|
||||||
|
case "{":
|
||||||
|
terminator = "}"
|
||||||
|
case "<":
|
||||||
|
terminator = ">"
|
||||||
|
default:
|
||||||
|
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||||
|
}
|
||||||
|
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
||||||
|
return p.readStruct(fv, terminator)
|
||||||
|
case reflect.Uint32:
|
||||||
|
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||||
|
fv.SetUint(uint64(x))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case reflect.Uint64:
|
||||||
|
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||||
|
fv.SetUint(x)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p.errorf("invalid %v: %v", v.Type(), tok.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
||||||
|
// before starting to unmarshal, so any existing data in pb is always removed.
|
||||||
|
// If a required field is not set and no other error occurs,
|
||||||
|
// UnmarshalText returns *RequiredNotSetError.
|
||||||
|
func UnmarshalText(s string, pb Message) error {
|
||||||
|
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||||
|
err := um.UnmarshalText([]byte(s))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pb.Reset()
|
||||||
|
v := reflect.ValueOf(pb)
|
||||||
|
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
|
||||||
|
return pe
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
673
vendor/src/github.com/golang/protobuf/proto/text_parser_test.go
vendored
Normal file
673
vendor/src/github.com/golang/protobuf/proto/text_parser_test.go
vendored
Normal file
|
@ -0,0 +1,673 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
. "github.com/golang/protobuf/proto"
|
||||||
|
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
. "github.com/golang/protobuf/proto/testdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
type UnmarshalTextTest struct {
|
||||||
|
in string
|
||||||
|
err string // if "", no error expected
|
||||||
|
out *MyMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildExtStructTest(text string) UnmarshalTextTest {
|
||||||
|
msg := &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
}
|
||||||
|
SetExtension(msg, E_Ext_More, &Ext{
|
||||||
|
Data: String("Hello, world!"),
|
||||||
|
})
|
||||||
|
return UnmarshalTextTest{in: text, out: msg}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildExtDataTest(text string) UnmarshalTextTest {
|
||||||
|
msg := &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
}
|
||||||
|
SetExtension(msg, E_Ext_Text, String("Hello, world!"))
|
||||||
|
SetExtension(msg, E_Ext_Number, Int32(1729))
|
||||||
|
return UnmarshalTextTest{in: text, out: msg}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildExtRepStringTest(text string) UnmarshalTextTest {
|
||||||
|
msg := &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
}
|
||||||
|
if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return UnmarshalTextTest{in: text, out: msg}
|
||||||
|
}
|
||||||
|
|
||||||
|
var unMarshalTextTests = []UnmarshalTextTest{
|
||||||
|
// Basic
|
||||||
|
{
|
||||||
|
in: " count:42\n name:\"Dave\" ",
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Name: String("Dave"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Empty quoted string
|
||||||
|
{
|
||||||
|
in: `count:42 name:""`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Name: String(""),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Quoted string concatenation with double quotes
|
||||||
|
{
|
||||||
|
in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Name: String("My name is elsewhere"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Quoted string concatenation with single quotes
|
||||||
|
{
|
||||||
|
in: "count:42 name: 'My name is '\n'elsewhere'",
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Name: String("My name is elsewhere"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Quoted string concatenations with mixed quotes
|
||||||
|
{
|
||||||
|
in: "count:42 name: 'My name is '\n\"elsewhere\"",
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Name: String("My name is elsewhere"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "count:42 name: \"My name is \"\n'elsewhere'",
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Name: String("My name is elsewhere"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Quoted string with escaped apostrophe
|
||||||
|
{
|
||||||
|
in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Name: String("HOLIDAY - New Year's Day"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Quoted string with single quote
|
||||||
|
{
|
||||||
|
in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Name: String(`Roger "The Ramster" Ramjet`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Quoted string with all the accepted special characters from the C++ test
|
||||||
|
{
|
||||||
|
in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Quoted string with quoted backslash
|
||||||
|
{
|
||||||
|
in: `count:42 name: "\\'xyz"`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Name: String(`\'xyz`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Quoted string with UTF-8 bytes.
|
||||||
|
{
|
||||||
|
in: "count:42 name: '\303\277\302\201\xAB'",
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Name: String("\303\277\302\201\xAB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Bad quoted string
|
||||||
|
{
|
||||||
|
in: `inner: < host: "\0" >` + "\n",
|
||||||
|
err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Number too large for int64
|
||||||
|
{
|
||||||
|
in: "count: 1 others { key: 123456789012345678901 }",
|
||||||
|
err: "line 1.23: invalid int64: 123456789012345678901",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Number too large for int32
|
||||||
|
{
|
||||||
|
in: "count: 1234567890123",
|
||||||
|
err: "line 1.7: invalid int32: 1234567890123",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Number in hexadecimal
|
||||||
|
{
|
||||||
|
in: "count: 0x2beef",
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(0x2beef),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Number in octal
|
||||||
|
{
|
||||||
|
in: "count: 024601",
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(024601),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Floating point number with "f" suffix
|
||||||
|
{
|
||||||
|
in: "count: 4 others:< weight: 17.0f >",
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(4),
|
||||||
|
Others: []*OtherMessage{
|
||||||
|
{
|
||||||
|
Weight: Float32(17),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Floating point positive infinity
|
||||||
|
{
|
||||||
|
in: "count: 4 bigfloat: inf",
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(4),
|
||||||
|
Bigfloat: Float64(math.Inf(1)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Floating point negative infinity
|
||||||
|
{
|
||||||
|
in: "count: 4 bigfloat: -inf",
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(4),
|
||||||
|
Bigfloat: Float64(math.Inf(-1)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Number too large for float32
|
||||||
|
{
|
||||||
|
in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
|
||||||
|
err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Number posing as a quoted string
|
||||||
|
{
|
||||||
|
in: `inner: < host: 12 >` + "\n",
|
||||||
|
err: `line 1.15: invalid string: 12`,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Quoted string posing as int32
|
||||||
|
{
|
||||||
|
in: `count: "12"`,
|
||||||
|
err: `line 1.7: invalid int32: "12"`,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Quoted string posing a float32
|
||||||
|
{
|
||||||
|
in: `others:< weight: "17.4" >`,
|
||||||
|
err: `line 1.17: invalid float32: "17.4"`,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Enum
|
||||||
|
{
|
||||||
|
in: `count:42 bikeshed: BLUE`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Bikeshed: MyMessage_BLUE.Enum(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Repeated field
|
||||||
|
{
|
||||||
|
in: `count:42 pet: "horsey" pet:"bunny"`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Pet: []string{"horsey", "bunny"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Repeated field with list notation
|
||||||
|
{
|
||||||
|
in: `count:42 pet: ["horsey", "bunny"]`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Pet: []string{"horsey", "bunny"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Repeated message with/without colon and <>/{}
|
||||||
|
{
|
||||||
|
in: `count:42 others:{} others{} others:<> others:{}`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Others: []*OtherMessage{
|
||||||
|
{},
|
||||||
|
{},
|
||||||
|
{},
|
||||||
|
{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Missing colon for inner message
|
||||||
|
{
|
||||||
|
in: `count:42 inner < host: "cauchy.syd" >`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Inner: &InnerMessage{
|
||||||
|
Host: String("cauchy.syd"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Missing colon for string field
|
||||||
|
{
|
||||||
|
in: `name "Dave"`,
|
||||||
|
err: `line 1.5: expected ':', found "\"Dave\""`,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Missing colon for int32 field
|
||||||
|
{
|
||||||
|
in: `count 42`,
|
||||||
|
err: `line 1.6: expected ':', found "42"`,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Missing required field
|
||||||
|
{
|
||||||
|
in: `name: "Pawel"`,
|
||||||
|
err: `proto: required field "testdata.MyMessage.count" not set`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Name: String("Pawel"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Missing required field in a required submessage
|
||||||
|
{
|
||||||
|
in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`,
|
||||||
|
err: `proto: required field "testdata.InnerMessage.host" not set`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Repeated non-repeated field
|
||||||
|
{
|
||||||
|
in: `name: "Rob" name: "Russ"`,
|
||||||
|
err: `line 1.12: non-repeated field "name" was repeated`,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Group
|
||||||
|
{
|
||||||
|
in: `count: 17 SomeGroup { group_field: 12 }`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(17),
|
||||||
|
Somegroup: &MyMessage_SomeGroup{
|
||||||
|
GroupField: Int32(12),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Semicolon between fields
|
||||||
|
{
|
||||||
|
in: `count:3;name:"Calvin"`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(3),
|
||||||
|
Name: String("Calvin"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Comma between fields
|
||||||
|
{
|
||||||
|
in: `count:4,name:"Ezekiel"`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(4),
|
||||||
|
Name: String("Ezekiel"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Boolean false
|
||||||
|
{
|
||||||
|
in: `count:42 inner { host: "example.com" connected: false }`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Inner: &InnerMessage{
|
||||||
|
Host: String("example.com"),
|
||||||
|
Connected: Bool(false),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Boolean true
|
||||||
|
{
|
||||||
|
in: `count:42 inner { host: "example.com" connected: true }`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Inner: &InnerMessage{
|
||||||
|
Host: String("example.com"),
|
||||||
|
Connected: Bool(true),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Boolean 0
|
||||||
|
{
|
||||||
|
in: `count:42 inner { host: "example.com" connected: 0 }`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Inner: &InnerMessage{
|
||||||
|
Host: String("example.com"),
|
||||||
|
Connected: Bool(false),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Boolean 1
|
||||||
|
{
|
||||||
|
in: `count:42 inner { host: "example.com" connected: 1 }`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Inner: &InnerMessage{
|
||||||
|
Host: String("example.com"),
|
||||||
|
Connected: Bool(true),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Boolean f
|
||||||
|
{
|
||||||
|
in: `count:42 inner { host: "example.com" connected: f }`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Inner: &InnerMessage{
|
||||||
|
Host: String("example.com"),
|
||||||
|
Connected: Bool(false),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Boolean t
|
||||||
|
{
|
||||||
|
in: `count:42 inner { host: "example.com" connected: t }`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Inner: &InnerMessage{
|
||||||
|
Host: String("example.com"),
|
||||||
|
Connected: Bool(true),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Boolean False
|
||||||
|
{
|
||||||
|
in: `count:42 inner { host: "example.com" connected: False }`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Inner: &InnerMessage{
|
||||||
|
Host: String("example.com"),
|
||||||
|
Connected: Bool(false),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Boolean True
|
||||||
|
{
|
||||||
|
in: `count:42 inner { host: "example.com" connected: True }`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Inner: &InnerMessage{
|
||||||
|
Host: String("example.com"),
|
||||||
|
Connected: Bool(true),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Extension
|
||||||
|
buildExtStructTest(`count: 42 [testdata.Ext.more]:<data:"Hello, world!" >`),
|
||||||
|
buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`),
|
||||||
|
buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`),
|
||||||
|
buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`),
|
||||||
|
|
||||||
|
// Big all-in-one
|
||||||
|
{
|
||||||
|
in: "count:42 # Meaning\n" +
|
||||||
|
`name:"Dave" ` +
|
||||||
|
`quote:"\"I didn't want to go.\"" ` +
|
||||||
|
`pet:"bunny" ` +
|
||||||
|
`pet:"kitty" ` +
|
||||||
|
`pet:"horsey" ` +
|
||||||
|
`inner:<` +
|
||||||
|
` host:"footrest.syd" ` +
|
||||||
|
` port:7001 ` +
|
||||||
|
` connected:true ` +
|
||||||
|
`> ` +
|
||||||
|
`others:<` +
|
||||||
|
` key:3735928559 ` +
|
||||||
|
` value:"\x01A\a\f" ` +
|
||||||
|
`> ` +
|
||||||
|
`others:<` +
|
||||||
|
" weight:58.9 # Atomic weight of Co\n" +
|
||||||
|
` inner:<` +
|
||||||
|
` host:"lesha.mtv" ` +
|
||||||
|
` port:8002 ` +
|
||||||
|
` >` +
|
||||||
|
`>`,
|
||||||
|
out: &MyMessage{
|
||||||
|
Count: Int32(42),
|
||||||
|
Name: String("Dave"),
|
||||||
|
Quote: String(`"I didn't want to go."`),
|
||||||
|
Pet: []string{"bunny", "kitty", "horsey"},
|
||||||
|
Inner: &InnerMessage{
|
||||||
|
Host: String("footrest.syd"),
|
||||||
|
Port: Int32(7001),
|
||||||
|
Connected: Bool(true),
|
||||||
|
},
|
||||||
|
Others: []*OtherMessage{
|
||||||
|
{
|
||||||
|
Key: Int64(3735928559),
|
||||||
|
Value: []byte{0x1, 'A', '\a', '\f'},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Weight: Float32(58.9),
|
||||||
|
Inner: &InnerMessage{
|
||||||
|
Host: String("lesha.mtv"),
|
||||||
|
Port: Int32(8002),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalText(t *testing.T) {
|
||||||
|
for i, test := range unMarshalTextTests {
|
||||||
|
pb := new(MyMessage)
|
||||||
|
err := UnmarshalText(test.in, pb)
|
||||||
|
if test.err == "" {
|
||||||
|
// We don't expect failure.
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Test %d: Unexpected error: %v", i, err)
|
||||||
|
} else if !reflect.DeepEqual(pb, test.out) {
|
||||||
|
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
|
||||||
|
i, pb, test.out)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// We do expect failure.
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
|
||||||
|
} else if err.Error() != test.err {
|
||||||
|
t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
|
||||||
|
i, err.Error(), test.err)
|
||||||
|
} else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {
|
||||||
|
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
|
||||||
|
i, pb, test.out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalTextCustomMessage(t *testing.T) {
|
||||||
|
msg := &textMessage{}
|
||||||
|
if err := UnmarshalText("custom", msg); err != nil {
|
||||||
|
t.Errorf("Unexpected error from custom unmarshal: %v", err)
|
||||||
|
}
|
||||||
|
if UnmarshalText("not custom", msg) == nil {
|
||||||
|
t.Errorf("Didn't get expected error from custom unmarshal")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regression test; this caused a panic.
|
||||||
|
func TestRepeatedEnum(t *testing.T) {
|
||||||
|
pb := new(RepeatedEnum)
|
||||||
|
if err := UnmarshalText("color: RED", pb); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
exp := &RepeatedEnum{
|
||||||
|
Color: []RepeatedEnum_Color{RepeatedEnum_RED},
|
||||||
|
}
|
||||||
|
if !Equal(pb, exp) {
|
||||||
|
t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProto3TextParsing(t *testing.T) {
|
||||||
|
m := new(proto3pb.Message)
|
||||||
|
const in = `name: "Wallace" true_scotsman: true`
|
||||||
|
want := &proto3pb.Message{
|
||||||
|
Name: "Wallace",
|
||||||
|
TrueScotsman: true,
|
||||||
|
}
|
||||||
|
if err := UnmarshalText(in, m); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !Equal(m, want) {
|
||||||
|
t.Errorf("\n got %v\nwant %v", m, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMapParsing(t *testing.T) {
|
||||||
|
m := new(MessageWithMap)
|
||||||
|
const in = `name_mapping:<key:1234 value:"Feist"> name_mapping:<key:1 value:"Beatles">` +
|
||||||
|
`msg_mapping:<key:-4, value:<f: 2.0>,>` + // separating commas are okay
|
||||||
|
`msg_mapping<key:-2 value<f: 4.0>>` + // no colon after "value"
|
||||||
|
`msg_mapping:<value:<f: 5.0>>` + // omitted key
|
||||||
|
`msg_mapping:<key:1>` + // omitted value
|
||||||
|
`byte_mapping:<key:true value:"so be it">` +
|
||||||
|
`byte_mapping:<>` // omitted key and value
|
||||||
|
want := &MessageWithMap{
|
||||||
|
NameMapping: map[int32]string{
|
||||||
|
1: "Beatles",
|
||||||
|
1234: "Feist",
|
||||||
|
},
|
||||||
|
MsgMapping: map[int64]*FloatingPoint{
|
||||||
|
-4: {F: Float64(2.0)},
|
||||||
|
-2: {F: Float64(4.0)},
|
||||||
|
0: {F: Float64(5.0)},
|
||||||
|
1: nil,
|
||||||
|
},
|
||||||
|
ByteMapping: map[bool][]byte{
|
||||||
|
false: nil,
|
||||||
|
true: []byte("so be it"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := UnmarshalText(in, m); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !Equal(m, want) {
|
||||||
|
t.Errorf("\n got %v\nwant %v", m, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOneofParsing(t *testing.T) {
|
||||||
|
const in = `name:"Shrek"`
|
||||||
|
m := new(Communique)
|
||||||
|
want := &Communique{Union: &Communique_Name{"Shrek"}}
|
||||||
|
if err := UnmarshalText(in, m); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !Equal(m, want) {
|
||||||
|
t.Errorf("\n got %v\nwant %v", m, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
const inOverwrite = `name:"Shrek" number:42`
|
||||||
|
m = new(Communique)
|
||||||
|
testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'"
|
||||||
|
if err := UnmarshalText(inOverwrite, m); err == nil {
|
||||||
|
t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr)
|
||||||
|
} else if err.Error() != testErr {
|
||||||
|
t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v",
|
||||||
|
err.Error(), testErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var benchInput string
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
benchInput = "count: 4\n"
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
benchInput += "pet: \"fido\"\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check it is valid input.
|
||||||
|
pb := new(MyMessage)
|
||||||
|
err := UnmarshalText(benchInput, pb)
|
||||||
|
if err != nil {
|
||||||
|
panic("Bad benchmark input: " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkUnmarshalText(b *testing.B) {
|
||||||
|
pb := new(MyMessage)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
UnmarshalText(benchInput, pb)
|
||||||
|
}
|
||||||
|
b.SetBytes(int64(len(benchInput)))
|
||||||
|
}
|
474
vendor/src/github.com/golang/protobuf/proto/text_test.go
vendored
Normal file
474
vendor/src/github.com/golang/protobuf/proto/text_test.go
vendored
Normal file
|
@ -0,0 +1,474 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
pb "github.com/golang/protobuf/proto/testdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// textMessage implements the methods that allow it to marshal and unmarshal
|
||||||
|
// itself as text.
|
||||||
|
type textMessage struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*textMessage) MarshalText() ([]byte, error) {
|
||||||
|
return []byte("custom"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*textMessage) UnmarshalText(bytes []byte) error {
|
||||||
|
if string(bytes) != "custom" {
|
||||||
|
return errors.New("expected 'custom'")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*textMessage) Reset() {}
|
||||||
|
func (*textMessage) String() string { return "" }
|
||||||
|
func (*textMessage) ProtoMessage() {}
|
||||||
|
|
||||||
|
func newTestMessage() *pb.MyMessage {
|
||||||
|
msg := &pb.MyMessage{
|
||||||
|
Count: proto.Int32(42),
|
||||||
|
Name: proto.String("Dave"),
|
||||||
|
Quote: proto.String(`"I didn't want to go."`),
|
||||||
|
Pet: []string{"bunny", "kitty", "horsey"},
|
||||||
|
Inner: &pb.InnerMessage{
|
||||||
|
Host: proto.String("footrest.syd"),
|
||||||
|
Port: proto.Int32(7001),
|
||||||
|
Connected: proto.Bool(true),
|
||||||
|
},
|
||||||
|
Others: []*pb.OtherMessage{
|
||||||
|
{
|
||||||
|
Key: proto.Int64(0xdeadbeef),
|
||||||
|
Value: []byte{1, 65, 7, 12},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Weight: proto.Float32(6.022),
|
||||||
|
Inner: &pb.InnerMessage{
|
||||||
|
Host: proto.String("lesha.mtv"),
|
||||||
|
Port: proto.Int32(8002),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Bikeshed: pb.MyMessage_BLUE.Enum(),
|
||||||
|
Somegroup: &pb.MyMessage_SomeGroup{
|
||||||
|
GroupField: proto.Int32(8),
|
||||||
|
},
|
||||||
|
// One normally wouldn't do this.
|
||||||
|
// This is an undeclared tag 13, as a varint (wire type 0) with value 4.
|
||||||
|
XXX_unrecognized: []byte{13<<3 | 0, 4},
|
||||||
|
}
|
||||||
|
ext := &pb.Ext{
|
||||||
|
Data: proto.String("Big gobs for big rats"),
|
||||||
|
}
|
||||||
|
if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
greetings := []string{"adg", "easy", "cow"}
|
||||||
|
if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add an unknown extension. We marshal a pb.Ext, and fake the ID.
|
||||||
|
b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
|
||||||
|
proto.SetRawExtension(msg, 201, b)
|
||||||
|
|
||||||
|
// Extensions can be plain fields, too, so let's test that.
|
||||||
|
b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
|
||||||
|
proto.SetRawExtension(msg, 202, b)
|
||||||
|
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
const text = `count: 42
|
||||||
|
name: "Dave"
|
||||||
|
quote: "\"I didn't want to go.\""
|
||||||
|
pet: "bunny"
|
||||||
|
pet: "kitty"
|
||||||
|
pet: "horsey"
|
||||||
|
inner: <
|
||||||
|
host: "footrest.syd"
|
||||||
|
port: 7001
|
||||||
|
connected: true
|
||||||
|
>
|
||||||
|
others: <
|
||||||
|
key: 3735928559
|
||||||
|
value: "\001A\007\014"
|
||||||
|
>
|
||||||
|
others: <
|
||||||
|
weight: 6.022
|
||||||
|
inner: <
|
||||||
|
host: "lesha.mtv"
|
||||||
|
port: 8002
|
||||||
|
>
|
||||||
|
>
|
||||||
|
bikeshed: BLUE
|
||||||
|
SomeGroup {
|
||||||
|
group_field: 8
|
||||||
|
}
|
||||||
|
/* 2 unknown bytes */
|
||||||
|
13: 4
|
||||||
|
[testdata.Ext.more]: <
|
||||||
|
data: "Big gobs for big rats"
|
||||||
|
>
|
||||||
|
[testdata.greeting]: "adg"
|
||||||
|
[testdata.greeting]: "easy"
|
||||||
|
[testdata.greeting]: "cow"
|
||||||
|
/* 13 unknown bytes */
|
||||||
|
201: "\t3G skiing"
|
||||||
|
/* 3 unknown bytes */
|
||||||
|
202: 19
|
||||||
|
`
|
||||||
|
|
||||||
|
func TestMarshalText(t *testing.T) {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err := proto.MarshalText(buf, newTestMessage()); err != nil {
|
||||||
|
t.Fatalf("proto.MarshalText: %v", err)
|
||||||
|
}
|
||||||
|
s := buf.String()
|
||||||
|
if s != text {
|
||||||
|
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalTextCustomMessage(t *testing.T) {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err := proto.MarshalText(buf, &textMessage{}); err != nil {
|
||||||
|
t.Fatalf("proto.MarshalText: %v", err)
|
||||||
|
}
|
||||||
|
s := buf.String()
|
||||||
|
if s != "custom" {
|
||||||
|
t.Errorf("Got %q, expected %q", s, "custom")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestMarshalTextNil(t *testing.T) {
|
||||||
|
want := "<nil>"
|
||||||
|
tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
|
||||||
|
for i, test := range tests {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err := proto.MarshalText(buf, test); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if got := buf.String(); got != want {
|
||||||
|
t.Errorf("%d: got %q want %q", i, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalTextUnknownEnum(t *testing.T) {
|
||||||
|
// The Color enum only specifies values 0-2.
|
||||||
|
m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}
|
||||||
|
got := m.String()
|
||||||
|
const want = `bikeshed:3 `
|
||||||
|
if got != want {
|
||||||
|
t.Errorf("\n got %q\nwant %q", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTextOneof(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
m proto.Message
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
// zero message
|
||||||
|
{&pb.Communique{}, ``},
|
||||||
|
// scalar field
|
||||||
|
{&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`},
|
||||||
|
// message field
|
||||||
|
{&pb.Communique{Union: &pb.Communique_Msg{
|
||||||
|
&pb.Strings{StringField: proto.String("why hello!")},
|
||||||
|
}}, `msg:<string_field:"why hello!" >`},
|
||||||
|
// bad oneof (should not panic)
|
||||||
|
{&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
got := strings.TrimSpace(test.m.String())
|
||||||
|
if got != test.want {
|
||||||
|
t.Errorf("\n got %s\nwant %s", got, test.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkMarshalTextBuffered(b *testing.B) {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
m := newTestMessage()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
buf.Reset()
|
||||||
|
proto.MarshalText(buf, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkMarshalTextUnbuffered(b *testing.B) {
|
||||||
|
w := ioutil.Discard
|
||||||
|
m := newTestMessage()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
proto.MarshalText(w, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func compact(src string) string {
|
||||||
|
// s/[ \n]+/ /g; s/ $//;
|
||||||
|
dst := make([]byte, len(src))
|
||||||
|
space, comment := false, false
|
||||||
|
j := 0
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if strings.HasPrefix(src[i:], "/*") {
|
||||||
|
comment = true
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if comment && strings.HasPrefix(src[i:], "*/") {
|
||||||
|
comment = false
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if comment {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c := src[i]
|
||||||
|
if c == ' ' || c == '\n' {
|
||||||
|
space = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {
|
||||||
|
space = false
|
||||||
|
}
|
||||||
|
if c == '{' {
|
||||||
|
space = false
|
||||||
|
}
|
||||||
|
if space {
|
||||||
|
dst[j] = ' '
|
||||||
|
j++
|
||||||
|
space = false
|
||||||
|
}
|
||||||
|
dst[j] = c
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
if space {
|
||||||
|
dst[j] = ' '
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
return string(dst[0:j])
|
||||||
|
}
|
||||||
|
|
||||||
|
var compactText = compact(text)
|
||||||
|
|
||||||
|
func TestCompactText(t *testing.T) {
|
||||||
|
s := proto.CompactTextString(newTestMessage())
|
||||||
|
if s != compactText {
|
||||||
|
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStringEscaping(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
in *pb.Strings
|
||||||
|
out string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
// Test data from C++ test (TextFormatTest.StringEscape).
|
||||||
|
// Single divergence: we don't escape apostrophes.
|
||||||
|
&pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")},
|
||||||
|
"string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test data from the same C++ test.
|
||||||
|
&pb.Strings{StringField: proto.String("\350\260\267\346\255\214")},
|
||||||
|
"string_field: \"\\350\\260\\267\\346\\255\\214\"\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Some UTF-8.
|
||||||
|
&pb.Strings{StringField: proto.String("\x00\x01\xff\x81")},
|
||||||
|
`string_field: "\000\001\377\201"` + "\n",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range testCases {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := proto.MarshalText(&buf, tc.in); err != nil {
|
||||||
|
t.Errorf("proto.MarsalText: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
s := buf.String()
|
||||||
|
if s != tc.out {
|
||||||
|
t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check round-trip.
|
||||||
|
pb := new(pb.Strings)
|
||||||
|
if err := proto.UnmarshalText(s, pb); err != nil {
|
||||||
|
t.Errorf("#%d: UnmarshalText: %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !proto.Equal(pb, tc.in) {
|
||||||
|
t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A limitedWriter accepts some output before it fails.
|
||||||
|
// This is a proxy for something like a nearly-full or imminently-failing disk,
|
||||||
|
// or a network connection that is about to die.
|
||||||
|
type limitedWriter struct {
|
||||||
|
b bytes.Buffer
|
||||||
|
limit int
|
||||||
|
}
|
||||||
|
|
||||||
|
var outOfSpace = errors.New("proto: insufficient space")
|
||||||
|
|
||||||
|
func (w *limitedWriter) Write(p []byte) (n int, err error) {
|
||||||
|
var avail = w.limit - w.b.Len()
|
||||||
|
if avail <= 0 {
|
||||||
|
return 0, outOfSpace
|
||||||
|
}
|
||||||
|
if len(p) <= avail {
|
||||||
|
return w.b.Write(p)
|
||||||
|
}
|
||||||
|
n, _ = w.b.Write(p[:avail])
|
||||||
|
return n, outOfSpace
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalTextFailing(t *testing.T) {
|
||||||
|
// Try lots of different sizes to exercise more error code-paths.
|
||||||
|
for lim := 0; lim < len(text); lim++ {
|
||||||
|
buf := new(limitedWriter)
|
||||||
|
buf.limit = lim
|
||||||
|
err := proto.MarshalText(buf, newTestMessage())
|
||||||
|
// We expect a certain error, but also some partial results in the buffer.
|
||||||
|
if err != outOfSpace {
|
||||||
|
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace)
|
||||||
|
}
|
||||||
|
s := buf.b.String()
|
||||||
|
x := text[:buf.limit]
|
||||||
|
if s != x {
|
||||||
|
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFloats(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
f float64
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{0, "0"},
|
||||||
|
{4.7, "4.7"},
|
||||||
|
{math.Inf(1), "inf"},
|
||||||
|
{math.Inf(-1), "-inf"},
|
||||||
|
{math.NaN(), "nan"},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
msg := &pb.FloatingPoint{F: &test.f}
|
||||||
|
got := strings.TrimSpace(msg.String())
|
||||||
|
want := `f:` + test.want
|
||||||
|
if got != want {
|
||||||
|
t.Errorf("f=%f: got %q, want %q", test.f, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRepeatedNilText(t *testing.T) {
|
||||||
|
m := &pb.MessageList{
|
||||||
|
Message: []*pb.MessageList_Message{
|
||||||
|
nil,
|
||||||
|
&pb.MessageList_Message{
|
||||||
|
Name: proto.String("Horse"),
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
want := `Message <nil>
|
||||||
|
Message {
|
||||||
|
name: "Horse"
|
||||||
|
}
|
||||||
|
Message <nil>
|
||||||
|
`
|
||||||
|
if s := proto.MarshalTextString(m); s != want {
|
||||||
|
t.Errorf(" got: %s\nwant: %s", s, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProto3Text(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
m proto.Message
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
// zero message
|
||||||
|
{&proto3pb.Message{}, ``},
|
||||||
|
// zero message except for an empty byte slice
|
||||||
|
{&proto3pb.Message{Data: []byte{}}, ``},
|
||||||
|
// trivial case
|
||||||
|
{&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`},
|
||||||
|
// empty map
|
||||||
|
{&pb.MessageWithMap{}, ``},
|
||||||
|
// non-empty map; map format is the same as a repeated struct,
|
||||||
|
// and they are sorted by key (numerically for numeric keys).
|
||||||
|
{
|
||||||
|
&pb.MessageWithMap{NameMapping: map[int32]string{
|
||||||
|
-1: "Negatory",
|
||||||
|
7: "Lucky",
|
||||||
|
1234: "Feist",
|
||||||
|
6345789: "Otis",
|
||||||
|
}},
|
||||||
|
`name_mapping:<key:-1 value:"Negatory" > ` +
|
||||||
|
`name_mapping:<key:7 value:"Lucky" > ` +
|
||||||
|
`name_mapping:<key:1234 value:"Feist" > ` +
|
||||||
|
`name_mapping:<key:6345789 value:"Otis" >`,
|
||||||
|
},
|
||||||
|
// map with nil value; not well-defined, but we shouldn't crash
|
||||||
|
{
|
||||||
|
&pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}},
|
||||||
|
`msg_mapping:<key:7 >`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
got := strings.TrimSpace(test.m.String())
|
||||||
|
if got != test.want {
|
||||||
|
t.Errorf("\n got %s\nwant %s", got, test.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
201
vendor/src/github.com/matrix-org/util/LICENSE
vendored
Normal file
201
vendor/src/github.com/matrix-org/util/LICENSE
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
7
vendor/src/github.com/matrix-org/util/README.md
vendored
Normal file
7
vendor/src/github.com/matrix-org/util/README.md
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# util
|
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/matrix-org/util?status.svg)](https://godoc.org/github.com/matrix-org/util)
|
||||||
|
[![Build Status](https://travis-ci.org/matrix-org/util.svg?branch=master)](https://travis-ci.org/matrix-org/util)
|
||||||
|
[![Coverage Status](https://coveralls.io/repos/github/matrix-org/util/badge.svg)](https://coveralls.io/github/matrix-org/util)
|
||||||
|
|
||||||
|
A loose collection of Golang functions that we use at matrix.org
|
18
vendor/src/github.com/matrix-org/util/error.go
vendored
Normal file
18
vendor/src/github.com/matrix-org/util/error.go
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
package util
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// HTTPError An HTTP Error response, which may wrap an underlying native Go Error.
|
||||||
|
type HTTPError struct {
|
||||||
|
WrappedError error
|
||||||
|
Message string
|
||||||
|
Code int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e HTTPError) Error() string {
|
||||||
|
var wrappedErrMsg string
|
||||||
|
if e.WrappedError != nil {
|
||||||
|
wrappedErrMsg = e.WrappedError.Error()
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s: %d: %s", e.Message, e.Code, wrappedErrMsg)
|
||||||
|
}
|
5
vendor/src/github.com/matrix-org/util/hooks/install.sh
vendored
Normal file
5
vendor/src/github.com/matrix-org/util/hooks/install.sh
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
DOT_GIT="$(dirname $0)/../.git"
|
||||||
|
|
||||||
|
ln -s "../../hooks/pre-commit" "$DOT_GIT/hooks/pre-commit"
|
9
vendor/src/github.com/matrix-org/util/hooks/pre-commit
vendored
Normal file
9
vendor/src/github.com/matrix-org/util/hooks/pre-commit
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
golint
|
||||||
|
go fmt
|
||||||
|
go tool vet --all --shadow .
|
||||||
|
gocyclo -over 12 .
|
||||||
|
go test -timeout 5s -test.v
|
154
vendor/src/github.com/matrix-org/util/json.go
vendored
Normal file
154
vendor/src/github.com/matrix-org/util/json.go
vendored
Normal file
|
@ -0,0 +1,154 @@
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"runtime/debug"
|
||||||
|
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ContextKeys is a type alias for string to namespace Context keys per-package.
|
||||||
|
type ContextKeys string
|
||||||
|
|
||||||
|
// CtxValueLogger is the key to extract the logrus Logger.
|
||||||
|
const CtxValueLogger = ContextKeys("logger")
|
||||||
|
|
||||||
|
// JSONRequestHandler represents an interface that must be satisfied in order to respond to incoming
|
||||||
|
// HTTP requests with JSON. The interface returned will be marshalled into JSON to be sent to the client,
|
||||||
|
// unless the interface is []byte in which case the bytes are sent to the client unchanged.
|
||||||
|
// If an error is returned, a JSON error response will also be returned, unless the error code
|
||||||
|
// is a 302 REDIRECT in which case a redirect is sent based on the Message field.
|
||||||
|
type JSONRequestHandler interface {
|
||||||
|
OnIncomingRequest(req *http.Request) (interface{}, *HTTPError)
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONError represents a JSON API error response
|
||||||
|
type JSONError struct {
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Protect panicking HTTP requests from taking down the entire process, and log them using
|
||||||
|
// the correct logger, returning a 500 with a JSON response rather than abruptly closing the
|
||||||
|
// connection. The http.Request MUST have a CtxValueLogger.
|
||||||
|
func Protect(handler http.HandlerFunc) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
logger := req.Context().Value(CtxValueLogger).(*log.Entry)
|
||||||
|
logger.WithFields(log.Fields{
|
||||||
|
"panic": r,
|
||||||
|
}).Errorf(
|
||||||
|
"Request panicked!\n%s", debug.Stack(),
|
||||||
|
)
|
||||||
|
jsonErrorResponse(
|
||||||
|
w, req, &HTTPError{nil, "Internal Server Error", 500},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
handler(w, req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeJSONAPI creates an HTTP handler which always responds to incoming requests with JSON responses.
|
||||||
|
// Incoming http.Requests will have a logger (with a request ID/method/path logged) attached to the Context.
|
||||||
|
// This can be accessed via the const CtxValueLogger. The type of the logger is *log.Entry from github.com/Sirupsen/logrus
|
||||||
|
func MakeJSONAPI(handler JSONRequestHandler) http.HandlerFunc {
|
||||||
|
return Protect(func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
// Set a Logger on the context
|
||||||
|
ctx := context.WithValue(req.Context(), CtxValueLogger, log.WithFields(log.Fields{
|
||||||
|
"req.method": req.Method,
|
||||||
|
"req.path": req.URL.Path,
|
||||||
|
"req.id": RandomString(12),
|
||||||
|
}))
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
|
||||||
|
logger := req.Context().Value(CtxValueLogger).(*log.Entry)
|
||||||
|
logger.Print("Incoming request")
|
||||||
|
|
||||||
|
res, httpErr := handler.OnIncomingRequest(req)
|
||||||
|
|
||||||
|
// Set common headers returned regardless of the outcome of the request
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
SetCORSHeaders(w)
|
||||||
|
|
||||||
|
if httpErr != nil {
|
||||||
|
jsonErrorResponse(w, req, httpErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// if they've returned bytes as the response, then just return them rather than marshalling as JSON.
|
||||||
|
// This gives handlers an escape hatch if they want to return cached bytes.
|
||||||
|
var resBytes []byte
|
||||||
|
resBytes, ok := res.([]byte)
|
||||||
|
if !ok {
|
||||||
|
r, err := json.Marshal(res)
|
||||||
|
if err != nil {
|
||||||
|
jsonErrorResponse(w, req, &HTTPError{nil, "Failed to serialise response as JSON", 500})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resBytes = r
|
||||||
|
}
|
||||||
|
logger.Print(fmt.Sprintf("Responding (%d bytes)", len(resBytes)))
|
||||||
|
w.Write(resBytes)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func jsonErrorResponse(w http.ResponseWriter, req *http.Request, httpErr *HTTPError) {
|
||||||
|
logger := req.Context().Value(CtxValueLogger).(*log.Entry)
|
||||||
|
if httpErr.Code == 302 {
|
||||||
|
logger.WithField("err", httpErr.Error()).Print("Redirecting")
|
||||||
|
http.Redirect(w, req, httpErr.Message, 302)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
logger.WithFields(log.Fields{
|
||||||
|
log.ErrorKey: httpErr,
|
||||||
|
}).Print("Responding with error")
|
||||||
|
|
||||||
|
w.WriteHeader(httpErr.Code) // Set response code
|
||||||
|
|
||||||
|
r, err := json.Marshal(&JSONError{
|
||||||
|
Message: httpErr.Message,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
// We should never fail to marshal the JSON error response, but in this event just skip
|
||||||
|
// marshalling altogether
|
||||||
|
logger.Warn("Failed to marshal error response")
|
||||||
|
w.Write([]byte(`{}`))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Write(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCORSOptions intercepts all OPTIONS requests and responds with CORS headers. The request handler
|
||||||
|
// is not invoked when this happens.
|
||||||
|
func WithCORSOptions(handler http.HandlerFunc) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
if req.Method == "OPTIONS" {
|
||||||
|
SetCORSHeaders(w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
handler(w, req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCORSHeaders sets unrestricted origin Access-Control headers on the response writer
|
||||||
|
func SetCORSHeaders(w http.ResponseWriter) {
|
||||||
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||||
|
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
|
||||||
|
w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
|
||||||
|
}
|
||||||
|
|
||||||
|
const alphanumerics = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||||
|
|
||||||
|
// RandomString generates a pseudo-random string of length n.
|
||||||
|
func RandomString(n int) string {
|
||||||
|
b := make([]byte, n)
|
||||||
|
for i := range b {
|
||||||
|
b[i] = alphanumerics[rand.Int63()%int64(len(alphanumerics))]
|
||||||
|
}
|
||||||
|
return string(b)
|
||||||
|
}
|
99
vendor/src/github.com/matrix-org/util/json_test.go
vendored
Normal file
99
vendor/src/github.com/matrix-org/util/json_test.go
vendored
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MockJSONRequestHandler struct {
|
||||||
|
handler func(req *http.Request) (interface{}, *HTTPError)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *MockJSONRequestHandler) OnIncomingRequest(req *http.Request) (interface{}, *HTTPError) {
|
||||||
|
return h.handler(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
type MockResponse struct {
|
||||||
|
Foo string `json:"foo"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMakeJSONAPI(t *testing.T) {
|
||||||
|
log.SetLevel(log.PanicLevel) // suppress logs in test output
|
||||||
|
tests := []struct {
|
||||||
|
Return interface{}
|
||||||
|
Err *HTTPError
|
||||||
|
ExpectCode int
|
||||||
|
ExpectJSON string
|
||||||
|
}{
|
||||||
|
{nil, &HTTPError{nil, "Everything is broken", 500}, 500, `{"message":"Everything is broken"}`}, // Error return values
|
||||||
|
{nil, &HTTPError{nil, "Not here", 404}, 404, `{"message":"Not here"}`}, // With different status codes
|
||||||
|
{&MockResponse{"yep"}, nil, 200, `{"foo":"yep"}`}, // Success return values
|
||||||
|
{[]MockResponse{{"yep"}, {"narp"}}, nil, 200, `[{"foo":"yep"},{"foo":"narp"}]`}, // Top-level array success values
|
||||||
|
{[]byte(`actually bytes`), nil, 200, `actually bytes`}, // raw []byte escape hatch
|
||||||
|
{func(cannotBe, marshalled string) {}, nil, 500, `{"message":"Failed to serialise response as JSON"}`}, // impossible marshal
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tst := range tests {
|
||||||
|
mock := MockJSONRequestHandler{func(req *http.Request) (interface{}, *HTTPError) {
|
||||||
|
return tst.Return, tst.Err
|
||||||
|
}}
|
||||||
|
mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
|
||||||
|
mockWriter := httptest.NewRecorder()
|
||||||
|
handlerFunc := MakeJSONAPI(&mock)
|
||||||
|
handlerFunc(mockWriter, mockReq)
|
||||||
|
if mockWriter.Code != tst.ExpectCode {
|
||||||
|
t.Errorf("TestMakeJSONAPI wanted HTTP status %d, got %d", tst.ExpectCode, mockWriter.Code)
|
||||||
|
}
|
||||||
|
actualBody := mockWriter.Body.String()
|
||||||
|
if actualBody != tst.ExpectJSON {
|
||||||
|
t.Errorf("TestMakeJSONAPI wanted body '%s', got '%s'", tst.ExpectJSON, actualBody)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMakeJSONAPIRedirect(t *testing.T) {
|
||||||
|
log.SetLevel(log.PanicLevel) // suppress logs in test output
|
||||||
|
mock := MockJSONRequestHandler{func(req *http.Request) (interface{}, *HTTPError) {
|
||||||
|
return nil, &HTTPError{nil, "https://matrix.org", 302}
|
||||||
|
}}
|
||||||
|
mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
|
||||||
|
mockWriter := httptest.NewRecorder()
|
||||||
|
handlerFunc := MakeJSONAPI(&mock)
|
||||||
|
handlerFunc(mockWriter, mockReq)
|
||||||
|
if mockWriter.Code != 302 {
|
||||||
|
t.Errorf("TestMakeJSONAPIRedirect wanted HTTP status 302, got %d", mockWriter.Code)
|
||||||
|
}
|
||||||
|
location := mockWriter.Header().Get("Location")
|
||||||
|
if location != "https://matrix.org" {
|
||||||
|
t.Errorf("TestMakeJSONAPIRedirect wanted Location header 'https://matrix.org', got '%s'", location)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProtect(t *testing.T) {
|
||||||
|
log.SetLevel(log.PanicLevel) // suppress logs in test output
|
||||||
|
mockWriter := httptest.NewRecorder()
|
||||||
|
mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
|
||||||
|
mockReq = mockReq.WithContext(
|
||||||
|
context.WithValue(mockReq.Context(), CtxValueLogger, log.WithField("test", "yep")),
|
||||||
|
)
|
||||||
|
h := Protect(func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
panic("oh noes!")
|
||||||
|
})
|
||||||
|
|
||||||
|
h(mockWriter, mockReq)
|
||||||
|
|
||||||
|
expectCode := 500
|
||||||
|
if mockWriter.Code != expectCode {
|
||||||
|
t.Errorf("TestProtect wanted HTTP status %d, got %d", expectCode, mockWriter.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectBody := `{"message":"Internal Server Error"}`
|
||||||
|
actualBody := mockWriter.Body.String()
|
||||||
|
if actualBody != expectBody {
|
||||||
|
t.Errorf("TestProtect wanted body %s, got %s", expectBody, actualBody)
|
||||||
|
}
|
||||||
|
}
|
7
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
vendored
Normal file
7
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
all:
|
||||||
|
|
||||||
|
cover:
|
||||||
|
go test -cover -v -coverprofile=cover.dat ./...
|
||||||
|
go tool cover -func cover.dat
|
||||||
|
|
||||||
|
.PHONY: cover
|
178
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go
vendored
Normal file
178
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go
vendored
Normal file
|
@ -0,0 +1,178 @@
|
||||||
|
// Copyright 2013 Matt T. Proud
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package pbutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
. "github.com/matttproud/golang_protobuf_extensions/testdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWriteDelimited(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
for _, test := range []struct {
|
||||||
|
msg proto.Message
|
||||||
|
buf []byte
|
||||||
|
n int
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
msg: &Empty{},
|
||||||
|
n: 1,
|
||||||
|
buf: []byte{0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: &GoEnum{Foo: FOO_FOO1.Enum()},
|
||||||
|
n: 3,
|
||||||
|
buf: []byte{2, 8, 1},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: &Strings{
|
||||||
|
StringField: proto.String(`This is my gigantic, unhappy string. It exceeds
|
||||||
|
the encoding size of a single byte varint. We are using it to fuzz test the
|
||||||
|
correctness of the header decoding mechanisms, which may prove problematic.
|
||||||
|
I expect it may. Let's hope you enjoy testing as much as we do.`),
|
||||||
|
},
|
||||||
|
n: 271,
|
||||||
|
buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109,
|
||||||
|
121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104,
|
||||||
|
97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73,
|
||||||
|
116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101,
|
||||||
|
110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102,
|
||||||
|
32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32,
|
||||||
|
118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32,
|
||||||
|
117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122,
|
||||||
|
122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114,
|
||||||
|
101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32,
|
||||||
|
104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103,
|
||||||
|
32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104,
|
||||||
|
105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112,
|
||||||
|
114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120,
|
||||||
|
112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101,
|
||||||
|
116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110,
|
||||||
|
106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32,
|
||||||
|
109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if n, err := WriteDelimited(&buf, test.msg); n != test.n || err != test.err {
|
||||||
|
t.Fatalf("WriteDelimited(buf, %#v) = %v, %v; want %v, %v", test.msg, n, err, test.n, test.err)
|
||||||
|
}
|
||||||
|
if out := buf.Bytes(); !bytes.Equal(out, test.buf) {
|
||||||
|
t.Fatalf("WriteDelimited(buf, %#v); buf = %v; want %v", test.msg, out, test.buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadDelimited(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
for _, test := range []struct {
|
||||||
|
buf []byte
|
||||||
|
msg proto.Message
|
||||||
|
n int
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
buf: []byte{0},
|
||||||
|
msg: &Empty{},
|
||||||
|
n: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
n: 3,
|
||||||
|
buf: []byte{2, 8, 1},
|
||||||
|
msg: &GoEnum{Foo: FOO_FOO1.Enum()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109,
|
||||||
|
121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104,
|
||||||
|
97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73,
|
||||||
|
116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101,
|
||||||
|
110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102,
|
||||||
|
32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32,
|
||||||
|
118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32,
|
||||||
|
117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122,
|
||||||
|
122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114,
|
||||||
|
101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32,
|
||||||
|
104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103,
|
||||||
|
32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104,
|
||||||
|
105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112,
|
||||||
|
114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120,
|
||||||
|
112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101,
|
||||||
|
116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110,
|
||||||
|
106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32,
|
||||||
|
109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46},
|
||||||
|
msg: &Strings{
|
||||||
|
StringField: proto.String(`This is my gigantic, unhappy string. It exceeds
|
||||||
|
the encoding size of a single byte varint. We are using it to fuzz test the
|
||||||
|
correctness of the header decoding mechanisms, which may prove problematic.
|
||||||
|
I expect it may. Let's hope you enjoy testing as much as we do.`),
|
||||||
|
},
|
||||||
|
n: 271,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
msg := proto.Clone(test.msg)
|
||||||
|
msg.Reset()
|
||||||
|
if n, err := ReadDelimited(bytes.NewBuffer(test.buf), msg); n != test.n || err != test.err {
|
||||||
|
t.Fatalf("ReadDelimited(%v, msg) = %v, %v; want %v, %v", test.buf, n, err, test.n, test.err)
|
||||||
|
}
|
||||||
|
if !proto.Equal(msg, test.msg) {
|
||||||
|
t.Fatalf("ReadDelimited(%v, msg); msg = %v; want %v", test.buf, msg, test.msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEndToEndValid(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
for _, test := range [][]proto.Message{
|
||||||
|
{&Empty{}},
|
||||||
|
{&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}},
|
||||||
|
{&GoEnum{Foo: FOO_FOO1.Enum()}},
|
||||||
|
{&Strings{
|
||||||
|
StringField: proto.String(`This is my gigantic, unhappy string. It exceeds
|
||||||
|
the encoding size of a single byte varint. We are using it to fuzz test the
|
||||||
|
correctness of the header decoding mechanisms, which may prove problematic.
|
||||||
|
I expect it may. Let's hope you enjoy testing as much as we do.`),
|
||||||
|
}},
|
||||||
|
} {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
var written int
|
||||||
|
for i, msg := range test {
|
||||||
|
n, err := WriteDelimited(&buf, msg)
|
||||||
|
if err != nil {
|
||||||
|
// Assumption: TestReadDelimited and TestWriteDelimited are sufficient
|
||||||
|
// and inputs for this test are explicitly exercised there.
|
||||||
|
t.Fatalf("WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil", test, i, err)
|
||||||
|
}
|
||||||
|
written += n
|
||||||
|
}
|
||||||
|
var read int
|
||||||
|
for i, msg := range test {
|
||||||
|
out := proto.Clone(msg)
|
||||||
|
out.Reset()
|
||||||
|
n, _ := ReadDelimited(&buf, out)
|
||||||
|
// Decide to do EOF checking?
|
||||||
|
read += n
|
||||||
|
if !proto.Equal(out, msg) {
|
||||||
|
t.Fatalf("out = %v; want %v[%d] = %#v", out, test, i, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if read != written {
|
||||||
|
t.Fatalf("%v read = %d; want %d", test, read, written)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
75
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
vendored
Normal file
75
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
vendored
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
// Copyright 2013 Matt T. Proud
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package pbutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errInvalidVarint = errors.New("invalid varint32 encountered")
|
||||||
|
|
||||||
|
// ReadDelimited decodes a message from the provided length-delimited stream,
|
||||||
|
// where the length is encoded as 32-bit varint prefix to the message body.
|
||||||
|
// It returns the total number of bytes read and any applicable error. This is
|
||||||
|
// roughly equivalent to the companion Java API's
|
||||||
|
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
|
||||||
|
// calls r.Read repeatedly as required until exactly one message including its
|
||||||
|
// prefix is read and decoded (or an error has occurred). The function never
|
||||||
|
// reads more bytes from the stream than required. The function never returns
|
||||||
|
// an error if a message has been read and decoded correctly, even if the end
|
||||||
|
// of the stream has been reached in doing so. In that case, any subsequent
|
||||||
|
// calls return (0, io.EOF).
|
||||||
|
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
|
||||||
|
// Per AbstractParser#parsePartialDelimitedFrom with
|
||||||
|
// CodedInputStream#readRawVarint32.
|
||||||
|
var headerBuf [binary.MaxVarintLen32]byte
|
||||||
|
var bytesRead, varIntBytes int
|
||||||
|
var messageLength uint64
|
||||||
|
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
|
||||||
|
if bytesRead >= len(headerBuf) {
|
||||||
|
return bytesRead, errInvalidVarint
|
||||||
|
}
|
||||||
|
// We have to read byte by byte here to avoid reading more bytes
|
||||||
|
// than required. Each read byte is appended to what we have
|
||||||
|
// read before.
|
||||||
|
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
|
||||||
|
if newBytesRead == 0 {
|
||||||
|
if err != nil {
|
||||||
|
return bytesRead, err
|
||||||
|
}
|
||||||
|
// A Reader should not return (0, nil), but if it does,
|
||||||
|
// it should be treated as no-op (according to the
|
||||||
|
// Reader contract). So let's go on...
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
bytesRead += newBytesRead
|
||||||
|
// Now present everything read so far to the varint decoder and
|
||||||
|
// see if a varint can be decoded already.
|
||||||
|
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
|
||||||
|
}
|
||||||
|
|
||||||
|
messageBuf := make([]byte, messageLength)
|
||||||
|
newBytesRead, err := io.ReadFull(r, messageBuf)
|
||||||
|
bytesRead += newBytesRead
|
||||||
|
if err != nil {
|
||||||
|
return bytesRead, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytesRead, proto.Unmarshal(messageBuf, m)
|
||||||
|
}
|
99
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go
vendored
Normal file
99
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go
vendored
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
// Copyright 2016 Matt T. Proud
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package pbutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
"testing/iotest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReadDelimitedIllegalVarint(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var tests = []struct {
|
||||||
|
in []byte
|
||||||
|
n int
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
in: []byte{255, 255, 255, 255, 255},
|
||||||
|
n: 5,
|
||||||
|
err: errInvalidVarint,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: []byte{255, 255, 255, 255, 255, 255},
|
||||||
|
n: 5,
|
||||||
|
err: errInvalidVarint,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
n, err := ReadDelimited(bytes.NewReader(test.in), nil)
|
||||||
|
if got, want := n, test.n; got != want {
|
||||||
|
t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", test.in, got, want)
|
||||||
|
}
|
||||||
|
if got, want := err, test.err; got != want {
|
||||||
|
t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", test.in, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadDelimitedPrematureHeader(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var data = []byte{128, 5} // 256 + 256 + 128
|
||||||
|
n, err := ReadDelimited(bytes.NewReader(data[0:1]), nil)
|
||||||
|
if got, want := n, 1; got != want {
|
||||||
|
t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data[0:1], got, want)
|
||||||
|
}
|
||||||
|
if got, want := err, io.EOF; got != want {
|
||||||
|
t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data[0:1], got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadDelimitedPrematureBody(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128
|
||||||
|
n, err := ReadDelimited(bytes.NewReader(data[:]), nil)
|
||||||
|
if got, want := n, 5; got != want {
|
||||||
|
t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data, got, want)
|
||||||
|
}
|
||||||
|
if got, want := err, io.ErrUnexpectedEOF; got != want {
|
||||||
|
t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadDelimitedPrematureHeaderIncremental(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var data = []byte{128, 5} // 256 + 256 + 128
|
||||||
|
n, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[0:1])), nil)
|
||||||
|
if got, want := n, 1; got != want {
|
||||||
|
t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data[0:1], got, want)
|
||||||
|
}
|
||||||
|
if got, want := err, io.EOF; got != want {
|
||||||
|
t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data[0:1], got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadDelimitedPrematureBodyIncremental(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128
|
||||||
|
n, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[:])), nil)
|
||||||
|
if got, want := n, 5; got != want {
|
||||||
|
t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data, got, want)
|
||||||
|
}
|
||||||
|
if got, want := err, io.ErrUnexpectedEOF; got != want {
|
||||||
|
t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data, got, want)
|
||||||
|
}
|
||||||
|
}
|
16
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
vendored
Normal file
16
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
// Copyright 2013 Matt T. Proud
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package pbutil provides record length-delimited Protocol Buffer streaming.
|
||||||
|
package pbutil
|
46
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
vendored
Normal file
46
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
vendored
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
// Copyright 2013 Matt T. Proud
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package pbutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WriteDelimited encodes and dumps a message to the provided writer prefixed
|
||||||
|
// with a 32-bit varint indicating the length of the encoded message, producing
|
||||||
|
// a length-delimited record stream, which can be used to chain together
|
||||||
|
// encoded messages of the same type together in a file. It returns the total
|
||||||
|
// number of bytes written and any applicable error. This is roughly
|
||||||
|
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
|
||||||
|
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
|
||||||
|
buffer, err := proto.Marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf [binary.MaxVarintLen32]byte
|
||||||
|
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
|
||||||
|
|
||||||
|
sync, err := w.Write(buf[:encodedLength])
|
||||||
|
if err != nil {
|
||||||
|
return sync, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = w.Write(buffer)
|
||||||
|
return n + sync, err
|
||||||
|
}
|
67
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go
vendored
Normal file
67
vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
// Copyright 2016 Matt T. Proud
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package pbutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errMarshal = errors.New("pbutil: can't marshal")
|
||||||
|
|
||||||
|
type cantMarshal struct{ proto.Message }
|
||||||
|
|
||||||
|
func (cantMarshal) Marshal() ([]byte, error) { return nil, errMarshal }
|
||||||
|
|
||||||
|
var _ proto.Message = cantMarshal{}
|
||||||
|
|
||||||
|
func TestWriteDelimitedMarshalErr(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var data cantMarshal
|
||||||
|
var buf bytes.Buffer
|
||||||
|
n, err := WriteDelimited(&buf, data)
|
||||||
|
if got, want := n, 0; got != want {
|
||||||
|
t.Errorf("WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?", data, got, want)
|
||||||
|
}
|
||||||
|
if got, want := err, errMarshal; got != want {
|
||||||
|
t.Errorf("WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v", data, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type canMarshal struct{ proto.Message }
|
||||||
|
|
||||||
|
func (canMarshal) Marshal() ([]byte, error) { return []byte{0, 1, 2, 3, 4, 5}, nil }
|
||||||
|
|
||||||
|
var errWrite = errors.New("pbutil: can't write")
|
||||||
|
|
||||||
|
type cantWrite struct{}
|
||||||
|
|
||||||
|
func (cantWrite) Write([]byte) (int, error) { return 0, errWrite }
|
||||||
|
|
||||||
|
func TestWriteDelimitedWriteErr(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var data canMarshal
|
||||||
|
var buf cantWrite
|
||||||
|
n, err := WriteDelimited(buf, data)
|
||||||
|
if got, want := n, 0; got != want {
|
||||||
|
t.Errorf("WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?", data, got, want)
|
||||||
|
}
|
||||||
|
if got, want := err, errWrite; got != want {
|
||||||
|
t.Errorf("WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v", data, got, want)
|
||||||
|
}
|
||||||
|
}
|
11
vendor/src/github.com/prometheus/client_golang/AUTHORS.md
vendored
Normal file
11
vendor/src/github.com/prometheus/client_golang/AUTHORS.md
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
The Prometheus project was started by Matt T. Proud (emeritus) and
|
||||||
|
Julius Volz in 2012.
|
||||||
|
|
||||||
|
Maintainers of this repository:
|
||||||
|
|
||||||
|
* Björn Rabenstein <beorn@soundcloud.com>
|
||||||
|
|
||||||
|
More than [30 individuals][1] have contributed to this repository. Please refer
|
||||||
|
to the Git commit log for a complete list.
|
||||||
|
|
||||||
|
[1]: https://github.com/prometheus/client_golang/graphs/contributors
|
109
vendor/src/github.com/prometheus/client_golang/CHANGELOG.md
vendored
Normal file
109
vendor/src/github.com/prometheus/client_golang/CHANGELOG.md
vendored
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
## 0.8.0 / 2016-08-17
|
||||||
|
* [CHANGE] Registry is doing more consistency checks. This might break
|
||||||
|
existing setups that used to export inconsistent metrics.
|
||||||
|
* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow
|
||||||
|
arbitrary grouping.
|
||||||
|
* [CHANGE] Removed `SelfCollector`.
|
||||||
|
* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods.
|
||||||
|
* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`,
|
||||||
|
`extraction`.
|
||||||
|
* [CHANGE] Deprecated a number of functions.
|
||||||
|
* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer`
|
||||||
|
interfaces.
|
||||||
|
* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package
|
||||||
|
`promhttp`) and enabling the creation of other exposition mechanisms.
|
||||||
|
* [FEATURE] `MustRegister` is variadic now, allowing registration of many
|
||||||
|
collectors in one call.
|
||||||
|
* [FEATURE] Added HTTP API v1 package.
|
||||||
|
* [ENHANCEMENT] Numerous documentation improvements.
|
||||||
|
* [ENHANCEMENT] Improved metric sorting.
|
||||||
|
* [ENHANCEMENT] Inlined fnv64a hashing for improved performance.
|
||||||
|
* [ENHANCEMENT] Several test improvements.
|
||||||
|
* [BUGFIX] Handle collisions in MetricVec.
|
||||||
|
|
||||||
|
## 0.7.0 / 2015-07-27
|
||||||
|
* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix.
|
||||||
|
* [BUGFIX] Closed gaps in metric consistency check.
|
||||||
|
* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling.
|
||||||
|
* [ENHANCEMENT] Document the possibility to create "empty" metrics in
|
||||||
|
a metric vector.
|
||||||
|
* [ENHANCEMENT] Fix and clarify various doc comments and the README.md.
|
||||||
|
* [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler.
|
||||||
|
* [ENHANCEMENT] Change responseWriterDelegator.written to int64.
|
||||||
|
|
||||||
|
## 0.6.0 / 2015-06-01
|
||||||
|
* [CHANGE] Rename process_goroutines to go_goroutines.
|
||||||
|
* [ENHANCEMENT] Validate label names during YAML decoding.
|
||||||
|
* [ENHANCEMENT] Add LabelName regular expression.
|
||||||
|
* [BUGFIX] Ensure alignment of struct members for 32-bit systems.
|
||||||
|
|
||||||
|
## 0.5.0 / 2015-05-06
|
||||||
|
* [BUGFIX] Removed a weakness in the fingerprinting aka signature code.
|
||||||
|
This makes fingerprinting slower and more allocation-heavy, but the
|
||||||
|
weakness was too severe to be tolerated.
|
||||||
|
* [CHANGE] As a result of the above, Metric.Fingerprint is now returning
|
||||||
|
a different fingerprint. To keep the same fingerprint, the new method
|
||||||
|
Metric.FastFingerprint was introduced, which will be used by the
|
||||||
|
Prometheus server for storage purposes (implying that a collision
|
||||||
|
detection has to be added, too).
|
||||||
|
* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on
|
||||||
|
fingerprinting anymore, removing the possibility of an undetected
|
||||||
|
fingerprint collision.
|
||||||
|
* [FEATURE] The Go collector in the exposition library includes garbage
|
||||||
|
collection stats.
|
||||||
|
* [FEATURE] The exposition library allows to create constant "throw-away"
|
||||||
|
summaries and histograms.
|
||||||
|
* [CHANGE] A number of new reserved labels and prefixes.
|
||||||
|
|
||||||
|
## 0.4.0 / 2015-04-08
|
||||||
|
* [CHANGE] Return NaN when Summaries have no observations yet.
|
||||||
|
* [BUGFIX] Properly handle Summary decay upon Write().
|
||||||
|
* [BUGFIX] Fix the documentation link to the consumption library.
|
||||||
|
* [FEATURE] Allow the metric family injection hook to merge with existing
|
||||||
|
metric families.
|
||||||
|
* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs.
|
||||||
|
* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions.
|
||||||
|
|
||||||
|
## 0.3.2 / 2015-03-11
|
||||||
|
* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is
|
||||||
|
only used by the Prometheus server internally.
|
||||||
|
* [CLEANUP] Added licenses of vendored code left out by godep.
|
||||||
|
|
||||||
|
## 0.3.1 / 2015-03-04
|
||||||
|
* [ENHANCEMENT] Switched fingerprinting functions from own free list to
|
||||||
|
sync.Pool.
|
||||||
|
* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests).
|
||||||
|
|
||||||
|
## 0.3.0 / 2015-03-03
|
||||||
|
* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL
|
||||||
|
PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS
|
||||||
|
VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE.
|
||||||
|
* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was
|
||||||
|
arguably broken.)
|
||||||
|
* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If
|
||||||
|
client_golang is used as a library, the vendoring will stay out of your way.
|
||||||
|
* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made
|
||||||
|
the fingerprinting change above necessary.)
|
||||||
|
* [FEATURE] Added new fingerprinting functions SignatureForLabels and
|
||||||
|
SignatureWithoutLabels to be used by the Prometheus server. These functions
|
||||||
|
require fewer allocations than the ones currently used by the server.
|
||||||
|
|
||||||
|
## 0.2.0 / 2015-02-23
|
||||||
|
* [FEATURE] Introduce new Histagram metric type.
|
||||||
|
* [CHANGE] Ignore process collector errors for now (better error handling
|
||||||
|
pending).
|
||||||
|
* [CHANGE] Use clear error interface for process pidFn.
|
||||||
|
* [BUGFIX] Fix Go download links for several archs and OSes.
|
||||||
|
* [ENHANCEMENT] Massively improve Gauge and Counter performance.
|
||||||
|
* [ENHANCEMENT] Catch illegal label names for summaries in histograms.
|
||||||
|
* [ENHANCEMENT] Reduce allocations during fingerprinting.
|
||||||
|
* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if
|
||||||
|
both cgo is available and the build is for an OS with procfs.
|
||||||
|
* [CLEANUP] Clean up code style issues.
|
||||||
|
* [CLEANUP] Mark slow test as such and exclude them from travis.
|
||||||
|
* [CLEANUP] Update protobuf library package name.
|
||||||
|
* [CLEANUP] Updated vendoring of beorn7/perks.
|
||||||
|
|
||||||
|
## 0.1.0 / 2015-02-02
|
||||||
|
* [CLEANUP] Introduced semantic versioning and changelog. From now on,
|
||||||
|
changes will be reported in this file.
|
18
vendor/src/github.com/prometheus/client_golang/CONTRIBUTING.md
vendored
Normal file
18
vendor/src/github.com/prometheus/client_golang/CONTRIBUTING.md
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
Prometheus uses GitHub to manage reviews of pull requests.
|
||||||
|
|
||||||
|
* If you have a trivial fix or improvement, go ahead and create a pull
|
||||||
|
request, addressing (with `@...`) one or more of the maintainers
|
||||||
|
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
|
||||||
|
|
||||||
|
* If you plan to do something more involved, first discuss your ideas
|
||||||
|
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
||||||
|
This will avoid unnecessary work and surely give you and us a good deal
|
||||||
|
of inspiration.
|
||||||
|
|
||||||
|
* Relevant coding style guidelines are the [Go Code Review
|
||||||
|
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
|
||||||
|
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
||||||
|
Practices for Production
|
||||||
|
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
|
201
vendor/src/github.com/prometheus/client_golang/LICENSE
vendored
Normal file
201
vendor/src/github.com/prometheus/client_golang/LICENSE
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
23
vendor/src/github.com/prometheus/client_golang/NOTICE
vendored
Normal file
23
vendor/src/github.com/prometheus/client_golang/NOTICE
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
Prometheus instrumentation library for Go applications
|
||||||
|
Copyright 2012-2015 The Prometheus Authors
|
||||||
|
|
||||||
|
This product includes software developed at
|
||||||
|
SoundCloud Ltd. (http://soundcloud.com/).
|
||||||
|
|
||||||
|
|
||||||
|
The following components are included in this product:
|
||||||
|
|
||||||
|
perks - a fork of https://github.com/bmizerany/perks
|
||||||
|
https://github.com/beorn7/perks
|
||||||
|
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
|
||||||
|
See https://github.com/beorn7/perks/blob/master/README.md for license details.
|
||||||
|
|
||||||
|
Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
http://github.com/golang/protobuf/
|
||||||
|
Copyright 2010 The Go Authors
|
||||||
|
See source code for license details.
|
||||||
|
|
||||||
|
Support for streaming Protocol Buffer messages for the Go language (golang).
|
||||||
|
https://github.com/matttproud/golang_protobuf_extensions
|
||||||
|
Copyright 2013 Matt T. Proud
|
||||||
|
Licensed under the Apache License, Version 2.0
|
46
vendor/src/github.com/prometheus/client_golang/README.md
vendored
Normal file
46
vendor/src/github.com/prometheus/client_golang/README.md
vendored
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
# Prometheus Go client library
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang)
|
||||||
|
|
||||||
|
This is the [Go](http://golang.org) client library for
|
||||||
|
[Prometheus](http://prometheus.io). It has two separate parts, one for
|
||||||
|
instrumenting application code, and one for creating clients that talk to the
|
||||||
|
Prometheus HTTP API.
|
||||||
|
|
||||||
|
## Instrumenting applications
|
||||||
|
|
||||||
|
[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus)
|
||||||
|
|
||||||
|
The
|
||||||
|
[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus)
|
||||||
|
contains the instrumentation library. See the
|
||||||
|
[best practices section](http://prometheus.io/docs/practices/naming/) of the
|
||||||
|
Prometheus documentation to learn more about instrumenting applications.
|
||||||
|
|
||||||
|
The
|
||||||
|
[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples)
|
||||||
|
contains simple examples of instrumented code.
|
||||||
|
|
||||||
|
## Client for the Prometheus HTTP API
|
||||||
|
|
||||||
|
[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus)
|
||||||
|
|
||||||
|
The
|
||||||
|
[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus)
|
||||||
|
contains the client for the
|
||||||
|
[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you
|
||||||
|
to write Go applications that query time series data from a Prometheus server.
|
||||||
|
|
||||||
|
## Where is `model`, `extraction`, and `text`?
|
||||||
|
|
||||||
|
The `model` packages has been moved to
|
||||||
|
[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model).
|
||||||
|
|
||||||
|
The `extraction` and `text` packages are now contained in
|
||||||
|
[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt).
|
||||||
|
|
||||||
|
## Contributing and community
|
||||||
|
|
||||||
|
See the [contributing guidelines](CONTRIBUTING.md) and the
|
||||||
|
[Community section](http://prometheus.io/community/) of the homepage.
|
1
vendor/src/github.com/prometheus/client_golang/VERSION
vendored
Normal file
1
vendor/src/github.com/prometheus/client_golang/VERSION
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
0.8.0
|
348
vendor/src/github.com/prometheus/client_golang/api/prometheus/api.go
vendored
Normal file
348
vendor/src/github.com/prometheus/client_golang/api/prometheus/api.go
vendored
Normal file
|
@ -0,0 +1,348 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package prometheus provides bindings to the Prometheus HTTP API:
|
||||||
|
// http://prometheus.io/docs/querying/api/
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/context/ctxhttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
statusAPIError = 422
|
||||||
|
apiPrefix = "/api/v1"
|
||||||
|
|
||||||
|
epQuery = "/query"
|
||||||
|
epQueryRange = "/query_range"
|
||||||
|
epLabelValues = "/label/:name/values"
|
||||||
|
epSeries = "/series"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrorType models the different API error types.
|
||||||
|
type ErrorType string
|
||||||
|
|
||||||
|
// Possible values for ErrorType.
|
||||||
|
const (
|
||||||
|
ErrBadData ErrorType = "bad_data"
|
||||||
|
ErrTimeout = "timeout"
|
||||||
|
ErrCanceled = "canceled"
|
||||||
|
ErrExec = "execution"
|
||||||
|
ErrBadResponse = "bad_response"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error is an error returned by the API.
|
||||||
|
type Error struct {
|
||||||
|
Type ErrorType
|
||||||
|
Msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
return fmt.Sprintf("%s: %s", e.Type, e.Msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CancelableTransport is like net.Transport but provides
|
||||||
|
// per-request cancelation functionality.
|
||||||
|
type CancelableTransport interface {
|
||||||
|
http.RoundTripper
|
||||||
|
CancelRequest(req *http.Request)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultTransport is used if no Transport is set in Config.
|
||||||
|
var DefaultTransport CancelableTransport = &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
TLSHandshakeTimeout: 10 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config defines configuration parameters for a new client.
|
||||||
|
type Config struct {
|
||||||
|
// The address of the Prometheus to connect to.
|
||||||
|
Address string
|
||||||
|
|
||||||
|
// Transport is used by the Client to drive HTTP requests. If not
|
||||||
|
// provided, DefaultTransport will be used.
|
||||||
|
Transport CancelableTransport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *Config) transport() CancelableTransport {
|
||||||
|
if cfg.Transport == nil {
|
||||||
|
return DefaultTransport
|
||||||
|
}
|
||||||
|
return cfg.Transport
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client is the interface for an API client.
|
||||||
|
type Client interface {
|
||||||
|
url(ep string, args map[string]string) *url.URL
|
||||||
|
do(context.Context, *http.Request) (*http.Response, []byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new Client.
|
||||||
|
//
|
||||||
|
// It is safe to use the returned Client from multiple goroutines.
|
||||||
|
func New(cfg Config) (Client, error) {
|
||||||
|
u, err := url.Parse(cfg.Address)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
u.Path = strings.TrimRight(u.Path, "/") + apiPrefix
|
||||||
|
|
||||||
|
return &httpClient{
|
||||||
|
endpoint: u,
|
||||||
|
transport: cfg.transport(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpClient struct {
|
||||||
|
endpoint *url.URL
|
||||||
|
transport CancelableTransport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *httpClient) url(ep string, args map[string]string) *url.URL {
|
||||||
|
p := path.Join(c.endpoint.Path, ep)
|
||||||
|
|
||||||
|
for arg, val := range args {
|
||||||
|
arg = ":" + arg
|
||||||
|
p = strings.Replace(p, arg, val, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
u := *c.endpoint
|
||||||
|
u.Path = p
|
||||||
|
|
||||||
|
return &u
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *httpClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
||||||
|
resp, err := ctxhttp.Do(ctx, &http.Client{Transport: c.transport}, req)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if resp != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var body []byte
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
body, err = ioutil.ReadAll(resp.Body)
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
err = resp.Body.Close()
|
||||||
|
<-done
|
||||||
|
if err == nil {
|
||||||
|
err = ctx.Err()
|
||||||
|
}
|
||||||
|
case <-done:
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, body, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// apiClient wraps a regular client and processes successful API responses.
|
||||||
|
// Successful also includes responses that errored at the API level.
|
||||||
|
type apiClient struct {
|
||||||
|
Client
|
||||||
|
}
|
||||||
|
|
||||||
|
type apiResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data json.RawMessage `json:"data"`
|
||||||
|
ErrorType ErrorType `json:"errorType"`
|
||||||
|
Error string `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c apiClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
||||||
|
resp, body, err := c.Client.do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return resp, body, err
|
||||||
|
}
|
||||||
|
|
||||||
|
code := resp.StatusCode
|
||||||
|
|
||||||
|
if code/100 != 2 && code != statusAPIError {
|
||||||
|
return resp, body, &Error{
|
||||||
|
Type: ErrBadResponse,
|
||||||
|
Msg: fmt.Sprintf("bad response code %d", resp.StatusCode),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var result apiResponse
|
||||||
|
|
||||||
|
if err = json.Unmarshal(body, &result); err != nil {
|
||||||
|
return resp, body, &Error{
|
||||||
|
Type: ErrBadResponse,
|
||||||
|
Msg: err.Error(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (code == statusAPIError) != (result.Status == "error") {
|
||||||
|
err = &Error{
|
||||||
|
Type: ErrBadResponse,
|
||||||
|
Msg: "inconsistent body for response code",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if code == statusAPIError && result.Status == "error" {
|
||||||
|
err = &Error{
|
||||||
|
Type: result.ErrorType,
|
||||||
|
Msg: result.Error,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, []byte(result.Data), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Range represents a sliced time range.
|
||||||
|
type Range struct {
|
||||||
|
// The boundaries of the time range.
|
||||||
|
Start, End time.Time
|
||||||
|
// The maximum time between two slices within the boundaries.
|
||||||
|
Step time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// queryResult contains result data for a query.
|
||||||
|
type queryResult struct {
|
||||||
|
Type model.ValueType `json:"resultType"`
|
||||||
|
Result interface{} `json:"result"`
|
||||||
|
|
||||||
|
// The decoded value.
|
||||||
|
v model.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (qr *queryResult) UnmarshalJSON(b []byte) error {
|
||||||
|
v := struct {
|
||||||
|
Type model.ValueType `json:"resultType"`
|
||||||
|
Result json.RawMessage `json:"result"`
|
||||||
|
}{}
|
||||||
|
|
||||||
|
err := json.Unmarshal(b, &v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v.Type {
|
||||||
|
case model.ValScalar:
|
||||||
|
var sv model.Scalar
|
||||||
|
err = json.Unmarshal(v.Result, &sv)
|
||||||
|
qr.v = &sv
|
||||||
|
|
||||||
|
case model.ValVector:
|
||||||
|
var vv model.Vector
|
||||||
|
err = json.Unmarshal(v.Result, &vv)
|
||||||
|
qr.v = vv
|
||||||
|
|
||||||
|
case model.ValMatrix:
|
||||||
|
var mv model.Matrix
|
||||||
|
err = json.Unmarshal(v.Result, &mv)
|
||||||
|
qr.v = mv
|
||||||
|
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("unexpected value type %q", v.Type)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAPI provides bindings the Prometheus's query API.
|
||||||
|
type QueryAPI interface {
|
||||||
|
// Query performs a query for the given time.
|
||||||
|
Query(ctx context.Context, query string, ts time.Time) (model.Value, error)
|
||||||
|
// Query performs a query for the given range.
|
||||||
|
QueryRange(ctx context.Context, query string, r Range) (model.Value, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQueryAPI returns a new QueryAPI for the client.
|
||||||
|
//
|
||||||
|
// It is safe to use the returned QueryAPI from multiple goroutines.
|
||||||
|
func NewQueryAPI(c Client) QueryAPI {
|
||||||
|
return &httpQueryAPI{client: apiClient{c}}
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpQueryAPI struct {
|
||||||
|
client Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *httpQueryAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) {
|
||||||
|
u := h.client.url(epQuery, nil)
|
||||||
|
q := u.Query()
|
||||||
|
|
||||||
|
q.Set("query", query)
|
||||||
|
q.Set("time", ts.Format(time.RFC3339Nano))
|
||||||
|
|
||||||
|
u.RawQuery = q.Encode()
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||||
|
|
||||||
|
_, body, err := h.client.do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var qres queryResult
|
||||||
|
err = json.Unmarshal(body, &qres)
|
||||||
|
|
||||||
|
return model.Value(qres.v), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *httpQueryAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) {
|
||||||
|
u := h.client.url(epQueryRange, nil)
|
||||||
|
q := u.Query()
|
||||||
|
|
||||||
|
var (
|
||||||
|
start = r.Start.Format(time.RFC3339Nano)
|
||||||
|
end = r.End.Format(time.RFC3339Nano)
|
||||||
|
step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64)
|
||||||
|
)
|
||||||
|
|
||||||
|
q.Set("query", query)
|
||||||
|
q.Set("start", start)
|
||||||
|
q.Set("end", end)
|
||||||
|
q.Set("step", step)
|
||||||
|
|
||||||
|
u.RawQuery = q.Encode()
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||||
|
|
||||||
|
_, body, err := h.client.do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var qres queryResult
|
||||||
|
err = json.Unmarshal(body, &qres)
|
||||||
|
|
||||||
|
return model.Value(qres.v), err
|
||||||
|
}
|
453
vendor/src/github.com/prometheus/client_golang/api/prometheus/api_test.go
vendored
Normal file
453
vendor/src/github.com/prometheus/client_golang/api/prometheus/api_test.go
vendored
Normal file
|
@ -0,0 +1,453 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfig(t *testing.T) {
|
||||||
|
c := Config{}
|
||||||
|
if c.transport() != DefaultTransport {
|
||||||
|
t.Fatalf("expected default transport for nil Transport field")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClientURL(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
address string
|
||||||
|
endpoint string
|
||||||
|
args map[string]string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
address: "http://localhost:9090",
|
||||||
|
endpoint: "/test",
|
||||||
|
expected: "http://localhost:9090/test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
address: "http://localhost",
|
||||||
|
endpoint: "/test",
|
||||||
|
expected: "http://localhost/test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
address: "http://localhost:9090",
|
||||||
|
endpoint: "test",
|
||||||
|
expected: "http://localhost:9090/test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
address: "http://localhost:9090/prefix",
|
||||||
|
endpoint: "/test",
|
||||||
|
expected: "http://localhost:9090/prefix/test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
address: "https://localhost:9090/",
|
||||||
|
endpoint: "/test/",
|
||||||
|
expected: "https://localhost:9090/test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
address: "http://localhost:9090",
|
||||||
|
endpoint: "/test/:param",
|
||||||
|
args: map[string]string{
|
||||||
|
"param": "content",
|
||||||
|
},
|
||||||
|
expected: "http://localhost:9090/test/content",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
address: "http://localhost:9090",
|
||||||
|
endpoint: "/test/:param/more/:param",
|
||||||
|
args: map[string]string{
|
||||||
|
"param": "content",
|
||||||
|
},
|
||||||
|
expected: "http://localhost:9090/test/content/more/content",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
address: "http://localhost:9090",
|
||||||
|
endpoint: "/test/:param/more/:foo",
|
||||||
|
args: map[string]string{
|
||||||
|
"param": "content",
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
expected: "http://localhost:9090/test/content/more/bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
address: "http://localhost:9090",
|
||||||
|
endpoint: "/test/:param",
|
||||||
|
args: map[string]string{
|
||||||
|
"nonexistant": "content",
|
||||||
|
},
|
||||||
|
expected: "http://localhost:9090/test/:param",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
ep, err := url.Parse(test.address)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hclient := &httpClient{
|
||||||
|
endpoint: ep,
|
||||||
|
transport: DefaultTransport,
|
||||||
|
}
|
||||||
|
|
||||||
|
u := hclient.url(test.endpoint, test.args)
|
||||||
|
if u.String() != test.expected {
|
||||||
|
t.Errorf("unexpected result: got %s, want %s", u, test.expected)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// The apiClient must return exactly the same result as the httpClient.
|
||||||
|
aclient := &apiClient{hclient}
|
||||||
|
|
||||||
|
u = aclient.url(test.endpoint, test.args)
|
||||||
|
if u.String() != test.expected {
|
||||||
|
t.Errorf("unexpected result: got %s, want %s", u, test.expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type testClient struct {
|
||||||
|
*testing.T
|
||||||
|
|
||||||
|
ch chan apiClientTest
|
||||||
|
req *http.Request
|
||||||
|
}
|
||||||
|
|
||||||
|
type apiClientTest struct {
|
||||||
|
code int
|
||||||
|
response interface{}
|
||||||
|
expected string
|
||||||
|
err *Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *testClient) url(ep string, args map[string]string) *url.URL {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *testClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
||||||
|
if ctx == nil {
|
||||||
|
c.Fatalf("context was not passed down")
|
||||||
|
}
|
||||||
|
if req != c.req {
|
||||||
|
c.Fatalf("request was not passed down")
|
||||||
|
}
|
||||||
|
|
||||||
|
test := <-c.ch
|
||||||
|
|
||||||
|
var b []byte
|
||||||
|
var err error
|
||||||
|
|
||||||
|
switch v := test.response.(type) {
|
||||||
|
case string:
|
||||||
|
b = []byte(v)
|
||||||
|
default:
|
||||||
|
b, err = json.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
c.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &http.Response{
|
||||||
|
StatusCode: test.code,
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPIClientDo(t *testing.T) {
|
||||||
|
tests := []apiClientTest{
|
||||||
|
{
|
||||||
|
response: &apiResponse{
|
||||||
|
Status: "error",
|
||||||
|
Data: json.RawMessage(`null`),
|
||||||
|
ErrorType: ErrBadData,
|
||||||
|
Error: "failed",
|
||||||
|
},
|
||||||
|
err: &Error{
|
||||||
|
Type: ErrBadData,
|
||||||
|
Msg: "failed",
|
||||||
|
},
|
||||||
|
code: statusAPIError,
|
||||||
|
expected: `null`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
response: &apiResponse{
|
||||||
|
Status: "error",
|
||||||
|
Data: json.RawMessage(`"test"`),
|
||||||
|
ErrorType: ErrTimeout,
|
||||||
|
Error: "timed out",
|
||||||
|
},
|
||||||
|
err: &Error{
|
||||||
|
Type: ErrTimeout,
|
||||||
|
Msg: "timed out",
|
||||||
|
},
|
||||||
|
code: statusAPIError,
|
||||||
|
expected: `test`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
response: "bad json",
|
||||||
|
err: &Error{
|
||||||
|
Type: ErrBadResponse,
|
||||||
|
Msg: "bad response code 400",
|
||||||
|
},
|
||||||
|
code: http.StatusBadRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
response: "bad json",
|
||||||
|
err: &Error{
|
||||||
|
Type: ErrBadResponse,
|
||||||
|
Msg: "invalid character 'b' looking for beginning of value",
|
||||||
|
},
|
||||||
|
code: statusAPIError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
response: &apiResponse{
|
||||||
|
Status: "success",
|
||||||
|
Data: json.RawMessage(`"test"`),
|
||||||
|
},
|
||||||
|
err: &Error{
|
||||||
|
Type: ErrBadResponse,
|
||||||
|
Msg: "inconsistent body for response code",
|
||||||
|
},
|
||||||
|
code: statusAPIError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
response: &apiResponse{
|
||||||
|
Status: "success",
|
||||||
|
Data: json.RawMessage(`"test"`),
|
||||||
|
ErrorType: ErrTimeout,
|
||||||
|
Error: "timed out",
|
||||||
|
},
|
||||||
|
err: &Error{
|
||||||
|
Type: ErrBadResponse,
|
||||||
|
Msg: "inconsistent body for response code",
|
||||||
|
},
|
||||||
|
code: statusAPIError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
response: &apiResponse{
|
||||||
|
Status: "error",
|
||||||
|
Data: json.RawMessage(`"test"`),
|
||||||
|
ErrorType: ErrTimeout,
|
||||||
|
Error: "timed out",
|
||||||
|
},
|
||||||
|
err: &Error{
|
||||||
|
Type: ErrBadResponse,
|
||||||
|
Msg: "inconsistent body for response code",
|
||||||
|
},
|
||||||
|
code: http.StatusOK,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
tc := &testClient{
|
||||||
|
T: t,
|
||||||
|
ch: make(chan apiClientTest, 1),
|
||||||
|
req: &http.Request{},
|
||||||
|
}
|
||||||
|
client := &apiClient{tc}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
|
||||||
|
tc.ch <- test
|
||||||
|
|
||||||
|
_, body, err := client.do(context.Background(), tc.req)
|
||||||
|
|
||||||
|
if test.err != nil {
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("expected error %q but got none", test.err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if test.err.Error() != err.Error() {
|
||||||
|
t.Errorf("unexpected error: want %q, got %q", test.err, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpeceted error %s", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
want, got := test.expected, string(body)
|
||||||
|
if want != got {
|
||||||
|
t.Errorf("unexpected body: want %q, got %q", want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type apiTestClient struct {
|
||||||
|
*testing.T
|
||||||
|
curTest apiTest
|
||||||
|
}
|
||||||
|
|
||||||
|
type apiTest struct {
|
||||||
|
do func() (interface{}, error)
|
||||||
|
inErr error
|
||||||
|
inRes interface{}
|
||||||
|
|
||||||
|
reqPath string
|
||||||
|
reqParam url.Values
|
||||||
|
reqMethod string
|
||||||
|
res interface{}
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *apiTestClient) url(ep string, args map[string]string) *url.URL {
|
||||||
|
u := &url.URL{
|
||||||
|
Host: "test:9090",
|
||||||
|
Path: apiPrefix + ep,
|
||||||
|
}
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *apiTestClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
||||||
|
|
||||||
|
test := c.curTest
|
||||||
|
|
||||||
|
if req.URL.Path != test.reqPath {
|
||||||
|
c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path)
|
||||||
|
}
|
||||||
|
if req.Method != test.reqMethod {
|
||||||
|
c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.Marshal(test.inRes)
|
||||||
|
if err != nil {
|
||||||
|
c.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &http.Response{}
|
||||||
|
if test.inErr != nil {
|
||||||
|
resp.StatusCode = statusAPIError
|
||||||
|
} else {
|
||||||
|
resp.StatusCode = http.StatusOK
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, b, test.inErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPIs(t *testing.T) {
|
||||||
|
|
||||||
|
testTime := time.Now()
|
||||||
|
|
||||||
|
client := &apiTestClient{T: t}
|
||||||
|
|
||||||
|
queryAPI := &httpQueryAPI{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
|
||||||
|
doQuery := func(q string, ts time.Time) func() (interface{}, error) {
|
||||||
|
return func() (interface{}, error) {
|
||||||
|
return queryAPI.Query(context.Background(), q, ts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
doQueryRange := func(q string, rng Range) func() (interface{}, error) {
|
||||||
|
return func() (interface{}, error) {
|
||||||
|
return queryAPI.QueryRange(context.Background(), q, rng)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
queryTests := []apiTest{
|
||||||
|
{
|
||||||
|
do: doQuery("2", testTime),
|
||||||
|
inRes: &queryResult{
|
||||||
|
Type: model.ValScalar,
|
||||||
|
Result: &model.Scalar{
|
||||||
|
Value: 2,
|
||||||
|
Timestamp: model.TimeFromUnix(testTime.Unix()),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
reqMethod: "GET",
|
||||||
|
reqPath: "/api/v1/query",
|
||||||
|
reqParam: url.Values{
|
||||||
|
"query": []string{"2"},
|
||||||
|
"time": []string{testTime.Format(time.RFC3339Nano)},
|
||||||
|
},
|
||||||
|
res: &model.Scalar{
|
||||||
|
Value: 2,
|
||||||
|
Timestamp: model.TimeFromUnix(testTime.Unix()),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
do: doQuery("2", testTime),
|
||||||
|
inErr: fmt.Errorf("some error"),
|
||||||
|
|
||||||
|
reqMethod: "GET",
|
||||||
|
reqPath: "/api/v1/query",
|
||||||
|
reqParam: url.Values{
|
||||||
|
"query": []string{"2"},
|
||||||
|
"time": []string{testTime.Format(time.RFC3339Nano)},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("some error"),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
do: doQueryRange("2", Range{
|
||||||
|
Start: testTime.Add(-time.Minute),
|
||||||
|
End: testTime,
|
||||||
|
Step: time.Minute,
|
||||||
|
}),
|
||||||
|
inErr: fmt.Errorf("some error"),
|
||||||
|
|
||||||
|
reqMethod: "GET",
|
||||||
|
reqPath: "/api/v1/query_range",
|
||||||
|
reqParam: url.Values{
|
||||||
|
"query": []string{"2"},
|
||||||
|
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
|
||||||
|
"end": []string{testTime.Format(time.RFC3339Nano)},
|
||||||
|
"step": []string{time.Minute.String()},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("some error"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var tests []apiTest
|
||||||
|
tests = append(tests, queryTests...)
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
client.curTest = test
|
||||||
|
|
||||||
|
res, err := test.do()
|
||||||
|
|
||||||
|
if test.err != nil {
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("expected error %q but got none", test.err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err.Error() != test.err.Error() {
|
||||||
|
t.Errorf("unexpected error: want %s, got %s", test.err, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %s", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(res, test.res) {
|
||||||
|
t.Errorf("unexpected result: want %v, got %v", test.res, res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
106
vendor/src/github.com/prometheus/client_golang/examples/random/main.go
vendored
Normal file
106
vendor/src/github.com/prometheus/client_golang/examples/random/main.go
vendored
Normal file
|
@ -0,0 +1,106 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// A simple example exposing fictional RPC latencies with different types of
|
||||||
|
// random distributions (uniform, normal, and exponential) as Prometheus
|
||||||
|
// metrics.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
|
||||||
|
uniformDomain = flag.Float64("uniform.domain", 0.0002, "The domain for the uniform distribution.")
|
||||||
|
normDomain = flag.Float64("normal.domain", 0.0002, "The domain for the normal distribution.")
|
||||||
|
normMean = flag.Float64("normal.mean", 0.00001, "The mean for the normal distribution.")
|
||||||
|
oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.")
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Create a summary to track fictional interservice RPC latencies for three
|
||||||
|
// distinct services with different latency distributions. These services are
|
||||||
|
// differentiated via a "service" label.
|
||||||
|
rpcDurations = prometheus.NewSummaryVec(
|
||||||
|
prometheus.SummaryOpts{
|
||||||
|
Name: "rpc_durations_seconds",
|
||||||
|
Help: "RPC latency distributions.",
|
||||||
|
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||||
|
},
|
||||||
|
[]string{"service"},
|
||||||
|
)
|
||||||
|
// The same as above, but now as a histogram, and only for the normal
|
||||||
|
// distribution. The buckets are targeted to the parameters of the
|
||||||
|
// normal distribution, with 20 buckets centered on the mean, each
|
||||||
|
// half-sigma wide.
|
||||||
|
rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||||
|
Name: "rpc_durations_histogram_seconds",
|
||||||
|
Help: "RPC latency distributions.",
|
||||||
|
Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20),
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Register the summary and the histogram with Prometheus's default registry.
|
||||||
|
prometheus.MustRegister(rpcDurations)
|
||||||
|
prometheus.MustRegister(rpcDurationsHistogram)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
oscillationFactor := func() float64 {
|
||||||
|
return 2 + math.Sin(math.Sin(2*math.Pi*float64(time.Since(start))/float64(*oscillationPeriod)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Periodically record some sample latencies for the three services.
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
v := rand.Float64() * *uniformDomain
|
||||||
|
rpcDurations.WithLabelValues("uniform").Observe(v)
|
||||||
|
time.Sleep(time.Duration(100*oscillationFactor()) * time.Millisecond)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
v := (rand.NormFloat64() * *normDomain) + *normMean
|
||||||
|
rpcDurations.WithLabelValues("normal").Observe(v)
|
||||||
|
rpcDurationsHistogram.Observe(v)
|
||||||
|
time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
v := rand.ExpFloat64() / 1e6
|
||||||
|
rpcDurations.WithLabelValues("exponential").Observe(v)
|
||||||
|
time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Expose the registered metrics via HTTP.
|
||||||
|
http.Handle("/metrics", promhttp.Handler())
|
||||||
|
log.Fatal(http.ListenAndServe(*addr, nil))
|
||||||
|
}
|
31
vendor/src/github.com/prometheus/client_golang/examples/simple/main.go
vendored
Normal file
31
vendor/src/github.com/prometheus/client_golang/examples/simple/main.go
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// A minimal example of how to include Prometheus instrumentation.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
http.Handle("/metrics", promhttp.Handler())
|
||||||
|
log.Fatal(http.ListenAndServe(*addr, nil))
|
||||||
|
}
|
1
vendor/src/github.com/prometheus/client_golang/prometheus/README.md
vendored
Normal file
1
vendor/src/github.com/prometheus/client_golang/prometheus/README.md
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue