aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/Shopify/sarama/.travis.yml8
-rw-r--r--vendor/github.com/Shopify/sarama/CHANGELOG.md29
-rw-r--r--vendor/github.com/Shopify/sarama/README.md2
-rw-r--r--vendor/github.com/Shopify/sarama/client.go45
-rw-r--r--vendor/github.com/Shopify/sarama/client_test.go18
-rw-r--r--vendor/github.com/Shopify/sarama/config.go30
-rw-r--r--vendor/github.com/Shopify/sarama/consumer.go94
-rw-r--r--vendor/github.com/Shopify/sarama/consumer_test.go42
-rw-r--r--vendor/github.com/Shopify/sarama/crc32_field.go6
-rw-r--r--vendor/github.com/Shopify/sarama/fetch_request.go14
-rw-r--r--vendor/github.com/Shopify/sarama/message.go26
-rw-r--r--vendor/github.com/Shopify/sarama/message_test.go34
-rw-r--r--vendor/github.com/Shopify/sarama/mockresponses.go10
-rw-r--r--vendor/github.com/Shopify/sarama/offset_manager.go18
-rw-r--r--vendor/github.com/Shopify/sarama/offset_manager_test.go64
-rw-r--r--vendor/github.com/Shopify/sarama/utils.go5
-rw-r--r--vendor/github.com/coreos/etcd/.travis.yml2
-rw-r--r--vendor/github.com/coreos/etcd/Makefile120
-rw-r--r--vendor/github.com/coreos/etcd/client/client.go19
-rw-r--r--vendor/github.com/coreos/etcd/client/client_test.go33
-rw-r--r--vendor/github.com/coreos/etcd/glide.lock6
-rw-r--r--vendor/github.com/coreos/etcd/glide.yaml2
-rw-r--r--vendor/github.com/coreos/etcd/version/version.go2
-rw-r--r--vendor/github.com/emicklei/go-restful/.travis.yml6
-rw-r--r--vendor/github.com/emicklei/go-restful/CHANGES.md125
-rw-r--r--vendor/github.com/emicklei/go-restful/Makefile7
-rw-r--r--vendor/github.com/emicklei/go-restful/README.md29
-rw-r--r--vendor/github.com/emicklei/go-restful/compress.go13
-rw-r--r--vendor/github.com/emicklei/go-restful/compress_test.go2
-rw-r--r--vendor/github.com/emicklei/go-restful/compressors.go9
-rw-r--r--vendor/github.com/emicklei/go-restful/container.go126
-rw-r--r--vendor/github.com/emicklei/go-restful/container_test.go22
-rw-r--r--vendor/github.com/emicklei/go-restful/cors_filter.go66
-rw-r--r--vendor/github.com/emicklei/go-restful/cors_filter_test.go20
-rw-r--r--vendor/github.com/emicklei/go-restful/curly.go14
-rw-r--r--vendor/github.com/emicklei/go-restful/curly_route.go26
-rw-r--r--vendor/github.com/emicklei/go-restful/curly_test.go16
-rw-r--r--vendor/github.com/emicklei/go-restful/doc.go19
-rw-r--r--vendor/github.com/emicklei/go-restful/entity_accessors.go20
-rw-r--r--vendor/github.com/emicklei/go-restful/entity_accessors_test.go2
-rw-r--r--vendor/github.com/emicklei/go-restful/filter.go9
-rw-r--r--vendor/github.com/emicklei/go-restful/install.sh9
-rw-r--r--vendor/github.com/emicklei/go-restful/jsr311.go27
-rw-r--r--vendor/github.com/emicklei/go-restful/jsr311_test.go39
-rw-r--r--vendor/github.com/emicklei/go-restful/log/log.go5
-rw-r--r--vendor/github.com/emicklei/go-restful/logger.go2
-rw-r--r--vendor/github.com/emicklei/go-restful/mime.go45
-rw-r--r--vendor/github.com/emicklei/go-restful/mime_test.go17
-rw-r--r--vendor/github.com/emicklei/go-restful/options_filter.go10
-rw-r--r--vendor/github.com/emicklei/go-restful/request.go34
-rw-r--r--vendor/github.com/emicklei/go-restful/request_test.go63
-rw-r--r--vendor/github.com/emicklei/go-restful/response.go92
-rw-r--r--vendor/github.com/emicklei/go-restful/response_test.go45
-rw-r--r--vendor/github.com/emicklei/go-restful/route.go11
-rw-r--r--vendor/github.com/emicklei/go-restful/route_builder.go90
-rw-r--r--vendor/github.com/emicklei/go-restful/route_builder_test.go20
-rw-r--r--vendor/github.com/emicklei/go-restful/web_service.go65
-rw-r--r--vendor/github.com/emicklei/go-restful/web_service_test.go89
-rw-r--r--vendor/github.com/go-openapi/loads/spec.go2
-rw-r--r--vendor/github.com/go-openapi/loads/spec_test.go142
-rw-r--r--vendor/github.com/go-openapi/spec/expander.go24
-rw-r--r--vendor/github.com/go-openapi/spec/items.go11
-rw-r--r--vendor/github.com/go-openapi/spec/refmodifier.go82
-rw-r--r--vendor/github.com/go-openapi/spec/refmodifier_test.go335
-rw-r--r--vendor/github.com/go-openapi/spec/swagger.go2
-rw-r--r--vendor/github.com/gogo/protobuf/.travis.yml12
-rw-r--r--vendor/github.com/gogo/protobuf/AUTHORS1
-rw-r--r--vendor/github.com/gogo/protobuf/CONTRIBUTORS4
-rw-r--r--vendor/github.com/gogo/protobuf/Makefile13
-rw-r--r--vendor/github.com/gogo/protobuf/README3
-rw-r--r--vendor/github.com/gogo/protobuf/Readme.md17
-rw-r--r--vendor/github.com/gogo/protobuf/extensions.md5
-rwxr-xr-xvendor/github.com/gogo/protobuf/install-protobuf.sh15
-rw-r--r--vendor/github.com/gogo/protobuf/proto/decode_test.go2
-rw-r--r--vendor/github.com/gogo/protobuf/proto/encode.go4
-rw-r--r--vendor/github.com/gogo/protobuf/proto/encode_test.go2
-rw-r--r--vendor/github.com/gogo/protobuf/proto/lib.go1
-rw-r--r--vendor/github.com/gogo/protobuf/proto/properties.go3
-rw-r--r--vendor/github.com/gogo/protobuf/proto/text.go23
-rw-r--r--vendor/github.com/gogo/protobuf/proto/text_parser.go2
-rw-r--r--vendor/github.com/gogo/protobuf/proto/text_test.go8
-rw-r--r--vendor/github.com/golang/protobuf/.travis.yml1
-rw-r--r--vendor/github.com/golang/protobuf/README.md2
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.pb.go10
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.proto10
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go144
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.proto117
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go160
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto133
-rw-r--r--vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md46
-rw-r--r--vendor/github.com/imdario/mergo/README.md29
-rw-r--r--vendor/github.com/imdario/mergo/issue23_test.go27
-rw-r--r--vendor/github.com/imdario/mergo/issue38_test.go59
-rw-r--r--vendor/github.com/imdario/mergo/map.go24
-rw-r--r--vendor/github.com/imdario/mergo/merge.go52
-rw-r--r--vendor/github.com/imdario/mergo/mergo.go2
-rw-r--r--vendor/github.com/imdario/mergo/mergo_test.go143
-rw-r--r--vendor/github.com/juju/ratelimit/ratelimit.go190
-rw-r--r--vendor/github.com/juju/ratelimit/ratelimit_test.go9
-rw-r--r--vendor/github.com/mailru/easyjson/.travis.yml9
-rw-r--r--vendor/github.com/mailru/easyjson/Makefile4
-rw-r--r--vendor/github.com/mailru/easyjson/README.md2
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/lexer.go27
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/lexer_test.go60
-rw-r--r--vendor/github.com/mailru/easyjson/jwriter/writer.go7
-rw-r--r--vendor/github.com/mitchellh/mapstructure/mapstructure.go22
-rw-r--r--vendor/github.com/pierrec/lz4/.gitignore31
-rw-r--r--vendor/github.com/pierrec/lz4/.travis.yml7
-rw-r--r--vendor/github.com/pierrec/lz4/block.go41
-rw-r--r--vendor/github.com/pierrec/lz4/lz4.go13
-rw-r--r--vendor/github.com/pierrec/lz4/lz4_test.go256
-rw-r--r--vendor/github.com/pierrec/lz4/writer.go14
-rw-r--r--vendor/github.com/prometheus/procfs/proc_limits.go38
-rw-r--r--vendor/github.com/prometheus/procfs/proc_limits_test.go6
114 files changed, 3427 insertions, 804 deletions
diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml
index 04d399ece..8e5a91ab0 100644
--- a/vendor/github.com/Shopify/sarama/.travis.yml
+++ b/vendor/github.com/Shopify/sarama/.travis.yml
@@ -1,7 +1,8 @@
language: go
go:
-- 1.7.3
-- 1.8
+- 1.7.x
+- 1.8.x
+- 1.9.x
env:
global:
@@ -12,7 +13,8 @@ env:
- DEBUG=true
matrix:
- KAFKA_VERSION=0.9.0.1
- - KAFKA_VERSION=0.10.2.0
+ - KAFKA_VERSION=0.10.2.1
+ - KAFKA_VERSION=0.11.0.1
before_install:
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
index 0a0082df7..5f65cb8c8 100644
--- a/vendor/github.com/Shopify/sarama/CHANGELOG.md
+++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md
@@ -1,5 +1,34 @@
# Changelog
+#### Version 1.13.0 (2017-10-04)
+
+New Features:
+ - Support for FetchRequest version 3
+ ([#905](https://github.com/Shopify/sarama/pull/905)).
+ - Permit setting version on mock FetchResponses
+ ([#939](https://github.com/Shopify/sarama/pull/939)).
+ - Add a configuration option to support storing only minimal metadata for
+ extremely large clusters
+ ([#937](https://github.com/Shopify/sarama/pull/937)).
+ - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
+ ([#932](https://github.com/Shopify/sarama/pull/932)).
+
+Improvements:
+ - Provide the block-level timestamp when consuming compressed messages
+ ([#885](https://github.com/Shopify/sarama/issues/885)).
+ - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
+ by the broker, which can be meaningful
+ ([#930](https://github.com/Shopify/sarama/pull/930)).
+ - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
+ variance in the actual timeout
+ ([#933](https://github.com/Shopify/sarama/pull/933)).
+
+Bug Fixes:
+ - Gracefully handle messages with negative timestamps
+ ([#907](https://github.com/Shopify/sarama/pull/907)).
+ - Raise a proper error when encountering an unknown message version
+ ([#940](https://github.com/Shopify/sarama/pull/940)).
+
#### Version 1.12.0 (2017-05-08)
New Features:
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
index 6e12a07ae..47a9bda52 100644
--- a/vendor/github.com/Shopify/sarama/README.md
+++ b/vendor/github.com/Shopify/sarama/README.md
@@ -20,7 +20,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
the two latest stable releases of Kafka and Go, and we provide a two month
grace period for older releases. This means we currently officially support
-Go 1.8 and 1.7, and Kafka 0.10 and 0.9, although older releases are
+Go 1.9 through 1.7, and Kafka 0.11 through 0.9, although older releases are
still likely to work.
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
index 45de3973d..570f7f3f3 100644
--- a/vendor/github.com/Shopify/sarama/client.go
+++ b/vendor/github.com/Shopify/sarama/client.go
@@ -141,18 +141,20 @@ func NewClient(addrs []string, conf *Config) (Client, error) {
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
}
- // do an initial fetch of all cluster metadata by specifying an empty list of topics
- err := client.RefreshMetadata()
- switch err {
- case nil:
- break
- case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
- // indicates that maybe part of the cluster is down, but is not fatal to creating the client
- Logger.Println(err)
- default:
- close(client.closed) // we haven't started the background updater yet, so we have to do this manually
- _ = client.Close()
- return nil, err
+ if conf.Metadata.Full {
+ // do an initial fetch of all cluster metadata by specifying an empty list of topics
+ err := client.RefreshMetadata()
+ switch err {
+ case nil:
+ break
+ case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
+ // indicates that maybe part of the cluster is down, but is not fatal to creating the client
+ Logger.Println(err)
+ default:
+ close(client.closed) // we haven't started the background updater yet, so we have to do this manually
+ _ = client.Close()
+ return nil, err
+ }
}
go withRecover(client.backgroundMetadataUpdater)
@@ -297,7 +299,7 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error)
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
}
- return dupeAndSort(metadata.Replicas), nil
+ return dupInt32Slice(metadata.Replicas), nil
}
func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
@@ -322,7 +324,7 @@ func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32,
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
}
- return dupeAndSort(metadata.Isr), nil
+ return dupInt32Slice(metadata.Isr), nil
}
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
@@ -605,7 +607,20 @@ func (client *client) backgroundMetadataUpdater() {
for {
select {
case <-ticker.C:
- if err := client.RefreshMetadata(); err != nil {
+ topics := []string{}
+ if !client.conf.Metadata.Full {
+ if specificTopics, err := client.Topics(); err != nil {
+ Logger.Println("Client background metadata topic load:", err)
+ break
+ } else if len(specificTopics) == 0 {
+ Logger.Println("Client background metadata update: no specific topics to update")
+ break
+ } else {
+ topics = specificTopics
+ }
+ }
+
+ if err := client.RefreshMetadata(topics...); err != nil {
Logger.Println("Client background metadata update:", err)
}
case <-client.closer:
diff --git a/vendor/github.com/Shopify/sarama/client_test.go b/vendor/github.com/Shopify/sarama/client_test.go
index 0bac1b405..2e1198d27 100644
--- a/vendor/github.com/Shopify/sarama/client_test.go
+++ b/vendor/github.com/Shopify/sarama/client_test.go
@@ -188,12 +188,12 @@ func TestClientMetadata(t *testing.T) {
replicas, err = client.Replicas("my_topic", 0)
if err != nil {
t.Error(err)
- } else if replicas[0] != 1 {
- t.Error("Incorrect (or unsorted) replica")
- } else if replicas[1] != 3 {
- t.Error("Incorrect (or unsorted) replica")
+ } else if replicas[0] != 3 {
+ t.Error("Incorrect (or sorted) replica")
+ } else if replicas[1] != 1 {
+ t.Error("Incorrect (or sorted) replica")
} else if replicas[2] != 5 {
- t.Error("Incorrect (or unsorted) replica")
+ t.Error("Incorrect (or sorted) replica")
}
isr, err = client.InSyncReplicas("my_topic", 0)
@@ -201,10 +201,10 @@ func TestClientMetadata(t *testing.T) {
t.Error(err)
} else if len(isr) != 2 {
t.Error("Client returned incorrect ISRs for partition:", isr)
- } else if isr[0] != 1 {
- t.Error("Incorrect (or unsorted) ISR:", isr)
- } else if isr[1] != 5 {
- t.Error("Incorrect (or unsorted) ISR:", isr)
+ } else if isr[0] != 5 {
+ t.Error("Incorrect (or sorted) ISR:", isr)
+ } else if isr[1] != 1 {
+ t.Error("Incorrect (or sorted) ISR:", isr)
}
leader.Close()
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
index 5021c57e9..e4ff680f2 100644
--- a/vendor/github.com/Shopify/sarama/config.go
+++ b/vendor/github.com/Shopify/sarama/config.go
@@ -72,6 +72,12 @@ type Config struct {
// Defaults to 10 minutes. Set to 0 to disable. Similar to
// `topic.metadata.refresh.interval.ms` in the JVM version.
RefreshFrequency time.Duration
+
+ // Whether to maintain a full set of metadata for all topics, or just
+ // the minimal set that has been necessary so far. The full set is simpler
+ // and usually more convenient, but can take up a substantial amount of
+ // memory if you have many topics and partitions. Defaults to true.
+ Full bool
}
// Producer is the namespace for configuration related to producing messages,
@@ -99,7 +105,10 @@ type Config struct {
Partitioner PartitionerConstructor
// Return specifies what channels will be populated. If they are set to true,
- // you must read from the respective channels to prevent deadlock.
+ // you must read from the respective channels to prevent deadlock. If,
+ // however, this config is used to create a `SyncProducer`, both must be set
+ // to true and you shall not read from the channels since the producer does
+ // this internally.
Return struct {
// If enabled, successfully delivered messages will be returned on the
// Successes channel (default disabled).
@@ -187,11 +196,23 @@ type Config struct {
// Equivalent to the JVM's `fetch.wait.max.ms`.
MaxWaitTime time.Duration
- // The maximum amount of time the consumer expects a message takes to process
- // for the user. If writing to the Messages channel takes longer than this,
- // that partition will stop fetching more messages until it can proceed again.
+ // The maximum amount of time the consumer expects a message takes to
+ // process for the user. If writing to the Messages channel takes longer
+ // than this, that partition will stop fetching more messages until it
+ // can proceed again.
// Note that, since the Messages channel is buffered, the actual grace time is
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
+ // If a message is not written to the Messages channel between two ticks
+ // of the expiryTicker then a timeout is detected.
+ // Using a ticker instead of a timer to detect timeouts should typically
+ // result in many fewer calls to Timer functions which may result in a
+ // significant performance improvement if many messages are being sent
+ // and timeouts are infrequent.
+ // The disadvantage of using a ticker instead of a timer is that
+ // timeouts will be less accurate. That is, the effective timeout could
+ // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
+ // example, if `MaxProcessingTime` is 100ms then a delay of 180ms
+ // between two messages being sent may not be recognized as a timeout.
MaxProcessingTime time.Duration
// Return specifies what channels will be populated. If they are set to true,
@@ -260,6 +281,7 @@ func NewConfig() *Config {
c.Metadata.Retry.Max = 3
c.Metadata.Retry.Backoff = 250 * time.Millisecond
c.Metadata.RefreshFrequency = 10 * time.Minute
+ c.Metadata.Full = true
c.Producer.MaxMessageBytes = 1000000
c.Producer.RequiredAcks = WaitForLocal
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
index 78d7fa2ca..2ce69b00b 100644
--- a/vendor/github.com/Shopify/sarama/consumer.go
+++ b/vendor/github.com/Shopify/sarama/consumer.go
@@ -10,11 +10,12 @@ import (
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
type ConsumerMessage struct {
- Key, Value []byte
- Topic string
- Partition int32
- Offset int64
- Timestamp time.Time // only set if kafka is version 0.10+
+ Key, Value []byte
+ Topic string
+ Partition int32
+ Offset int64
+ Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
+ BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
}
// ConsumerError is what is provided to the user when an error occurs.
@@ -246,9 +247,9 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
// PartitionConsumer
-// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close()
-// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically
-// when it passes out of scope.
+// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
+// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
+// of scope.
//
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
@@ -257,19 +258,25 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
+//
+// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
+// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
+// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
+// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
+// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
type PartitionConsumer interface {
- // AsyncClose initiates a shutdown of the PartitionConsumer. This method will
- // return immediately, after which you should wait until the 'messages' and
- // 'errors' channel are drained. It is required to call this function, or
- // Close before a consumer object passes out of scope, as it will otherwise
- // leak memory. You must call this before calling Close on the underlying client.
+ // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
+ // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
+ // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
+ // this before calling Close on the underlying client.
AsyncClose()
- // Close stops the PartitionConsumer from fetching messages. It is required to
- // call this function (or AsyncClose) before a consumer object passes out of
- // scope, as it will otherwise leak memory. You must call this before calling
- // Close on the underlying client.
+ // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
+ // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
+ // the Messages channel when this function is called, you will be competing with Close for messages; consider
+ // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
+ // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
Close() error
// Messages returns the read channel for the messages that are returned by
@@ -433,35 +440,37 @@ func (child *partitionConsumer) HighWaterMarkOffset() int64 {
func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage
- expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime)
- expireTimedOut := false
+ msgSent := false
feederLoop:
for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response)
+ expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
for i, msg := range msgs {
- if !expiryTimer.Stop() && !expireTimedOut {
- // expiryTimer was expired; clear out the waiting msg
- <-expiryTimer.C
- }
- expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime)
- expireTimedOut = false
-
+ messageSelect:
select {
case child.messages <- msg:
- case <-expiryTimer.C:
- expireTimedOut = true
- child.responseResult = errTimedOut
- child.broker.acks.Done()
- for _, msg = range msgs[i:] {
- child.messages <- msg
+ msgSent = true
+ case <-expiryTicker.C:
+ if !msgSent {
+ child.responseResult = errTimedOut
+ child.broker.acks.Done()
+ for _, msg = range msgs[i:] {
+ child.messages <- msg
+ }
+ child.broker.input <- child
+ continue feederLoop
+ } else {
+ // current message has not been sent, return to select
+ // statement
+ msgSent = false
+ goto messageSelect
}
- child.broker.input <- child
- continue feederLoop
}
}
+ expiryTicker.Stop()
child.broker.acks.Done()
}
@@ -520,12 +529,13 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
if offset >= child.offset {
messages = append(messages, &ConsumerMessage{
- Topic: child.topic,
- Partition: child.partition,
- Key: msg.Msg.Key,
- Value: msg.Msg.Value,
- Offset: offset,
- Timestamp: msg.Msg.Timestamp,
+ Topic: child.topic,
+ Partition: child.partition,
+ Key: msg.Msg.Key,
+ Value: msg.Msg.Value,
+ Offset: offset,
+ Timestamp: msg.Msg.Timestamp,
+ BlockTimestamp: msgBlock.Msg.Timestamp,
})
child.offset = offset + 1
} else {
@@ -726,6 +736,10 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 2
}
+ if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
+ request.Version = 3
+ request.MaxBytes = MaxResponseSize
+ }
for child := range bc.subscriptions {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
diff --git a/vendor/github.com/Shopify/sarama/consumer_test.go b/vendor/github.com/Shopify/sarama/consumer_test.go
index 387ede314..48f309b6d 100644
--- a/vendor/github.com/Shopify/sarama/consumer_test.go
+++ b/vendor/github.com/Shopify/sarama/consumer_test.go
@@ -803,6 +803,48 @@ func TestConsumerOffsetOutOfRange(t *testing.T) {
broker0.Close()
}
+func TestConsumerExpiryTicker(t *testing.T) {
+ // Given
+ broker0 := NewMockBroker(t, 0)
+ fetchResponse1 := &FetchResponse{}
+ for i := 1; i <= 8; i++ {
+ fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, int64(i))
+ }
+ broker0.SetHandlerByMap(map[string]MockResponse{
+ "MetadataRequest": NewMockMetadataResponse(t).
+ SetBroker(broker0.Addr(), broker0.BrokerID()).
+ SetLeader("my_topic", 0, broker0.BrokerID()),
+ "OffsetRequest": NewMockOffsetResponse(t).
+ SetOffset("my_topic", 0, OffsetNewest, 1234).
+ SetOffset("my_topic", 0, OffsetOldest, 1),
+ "FetchRequest": NewMockSequence(fetchResponse1),
+ })
+
+ config := NewConfig()
+ config.ChannelBufferSize = 0
+ config.Consumer.MaxProcessingTime = 10 * time.Millisecond
+ master, err := NewConsumer([]string{broker0.Addr()}, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // When
+ consumer, err := master.ConsumePartition("my_topic", 0, 1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Then: messages with offsets 1 through 8 are read
+ for i := 1; i <= 8; i++ {
+ assertMessageOffset(t, <-consumer.Messages(), int64(i))
+ time.Sleep(2 * time.Millisecond)
+ }
+
+ safeClose(t, consumer)
+ safeClose(t, master)
+ broker0.Close()
+}
+
func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
if msg.Offset != expectedOffset {
t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go
index f4fde18ad..e7da08c6f 100644
--- a/vendor/github.com/Shopify/sarama/crc32_field.go
+++ b/vendor/github.com/Shopify/sarama/crc32_field.go
@@ -2,6 +2,7 @@ package sarama
import (
"encoding/binary"
+ "fmt"
"hash/crc32"
)
@@ -27,8 +28,9 @@ func (c *crc32Field) run(curOffset int, buf []byte) error {
func (c *crc32Field) check(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
- if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) {
- return PacketDecodingError{"CRC didn't match"}
+ expected := binary.BigEndian.Uint32(buf[c.startOffset:])
+ if crc != expected {
+ return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
}
return nil
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
index ab817a06e..65600e86e 100644
--- a/vendor/github.com/Shopify/sarama/fetch_request.go
+++ b/vendor/github.com/Shopify/sarama/fetch_request.go
@@ -21,9 +21,13 @@ func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
return nil
}
+// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
+// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
type FetchRequest struct {
MaxWaitTime int32
MinBytes int32
+ MaxBytes int32
Version int16
blocks map[string]map[int32]*fetchRequestBlock
}
@@ -32,6 +36,9 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) {
pe.putInt32(-1) // replica ID is always -1 for clients
pe.putInt32(r.MaxWaitTime)
pe.putInt32(r.MinBytes)
+ if r.Version == 3 {
+ pe.putInt32(r.MaxBytes)
+ }
err = pe.putArrayLength(len(r.blocks))
if err != nil {
return err
@@ -67,6 +74,11 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
if r.MinBytes, err = pd.getInt32(); err != nil {
return err
}
+ if r.Version == 3 {
+ if r.MaxBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
topicCount, err := pd.getArrayLength()
if err != nil {
return err
@@ -114,6 +126,8 @@ func (r *FetchRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
case 2:
return V0_10_0_0
+ case 3:
+ return V0_10_1_0
default:
return minVersion
}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
index 327c5fa2a..06f175f67 100644
--- a/vendor/github.com/Shopify/sarama/message.go
+++ b/vendor/github.com/Shopify/sarama/message.go
@@ -45,7 +45,15 @@ func (m *Message) encode(pe packetEncoder) error {
pe.putInt8(attributes)
if m.Version >= 1 {
- pe.putInt64(m.Timestamp.UnixNano() / int64(time.Millisecond))
+ timestamp := int64(-1)
+
+ if !m.Timestamp.Before(time.Unix(0, 0)) {
+ timestamp = m.Timestamp.UnixNano() / int64(time.Millisecond)
+ } else if !m.Timestamp.IsZero() {
+ return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", m.Timestamp)}
+ }
+
+ pe.putInt64(timestamp)
}
err := pe.putBytes(m.Key)
@@ -114,18 +122,30 @@ func (m *Message) decode(pd packetDecoder) (err error) {
return err
}
+ if m.Version > 1 {
+ return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
+ }
+
attribute, err := pd.getInt8()
if err != nil {
return err
}
m.Codec = CompressionCodec(attribute & compressionCodecMask)
- if m.Version >= 1 {
+ if m.Version == 1 {
millis, err := pd.getInt64()
if err != nil {
return err
}
- m.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+
+ // negative timestamps are invalid, in these cases we should return
+ // a zero time
+ timestamp := time.Time{}
+ if millis >= 0 {
+ timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+ }
+
+ m.Timestamp = timestamp
}
m.Key, err = pd.getBytes()
diff --git a/vendor/github.com/Shopify/sarama/message_test.go b/vendor/github.com/Shopify/sarama/message_test.go
index d4a37c22d..630ad6e2a 100644
--- a/vendor/github.com/Shopify/sarama/message_test.go
+++ b/vendor/github.com/Shopify/sarama/message_test.go
@@ -2,6 +2,7 @@ package sarama
import (
"runtime"
+ "strings"
"testing"
"time"
)
@@ -14,6 +15,21 @@ var (
0xFF, 0xFF, 0xFF, 0xFF, // key
0xFF, 0xFF, 0xFF, 0xFF} // value
+ emptyV1Message = []byte{
+ 204, 47, 121, 217, // CRC
+ 0x01, // magic version byte
+ 0x00, // attribute flags
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // timestamp
+ 0xFF, 0xFF, 0xFF, 0xFF, // key
+ 0xFF, 0xFF, 0xFF, 0xFF} // value
+
+ emptyV2Message = []byte{
+ 167, 236, 104, 3, // CRC
+ 0x02, // magic version byte
+ 0x00, // attribute flags
+ 0xFF, 0xFF, 0xFF, 0xFF, // key
+ 0xFF, 0xFF, 0xFF, 0xFF} // value
+
emptyGzipMessage = []byte{
97, 79, 149, 90, //CRC
0x00, // magic version byte
@@ -91,7 +107,7 @@ func TestMessageEncoding(t *testing.T) {
message.Value = []byte{}
message.Codec = CompressionGZIP
- if runtime.Version() == "go1.8" {
+ if strings.HasPrefix(runtime.Version(), "go1.8") || strings.HasPrefix(runtime.Version(), "go1.9") {
testEncodable(t, "empty gzip", &message, emptyGzipMessage18)
} else {
testEncodable(t, "empty gzip", &message, emptyGzipMessage)
@@ -179,3 +195,19 @@ func TestMessageDecodingBulkLZ4(t *testing.T) {
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
}
}
+
+func TestMessageDecodingVersion1(t *testing.T) {
+ message := Message{Version: 1}
+ testDecodable(t, "decoding empty v1 message", &message, emptyV1Message)
+}
+
+func TestMessageDecodingUnknownVersions(t *testing.T) {
+ message := Message{Version: 2}
+ err := decode(emptyV2Message, &message)
+ if err == nil {
+ t.Error("Decoding did not produce an error for an unknown magic byte")
+ }
+ if err.Error() != "kafka: error decoding packet: unknown magic byte (2)" {
+ t.Error("Decoding an unknown magic byte produced an unknown error ", err)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
index a20314209..d94bd24c6 100644
--- a/vendor/github.com/Shopify/sarama/mockresponses.go
+++ b/vendor/github.com/Shopify/sarama/mockresponses.go
@@ -180,6 +180,7 @@ type MockFetchResponse struct {
highWaterMarks map[string]map[int32]int64
t TestReporter
batchSize int
+ version int16
}
func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
@@ -191,6 +192,11 @@ func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
}
}
+func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
+ mfr.version = version
+ return mfr
+}
+
func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
partitions := mfr.messages[topic]
if partitions == nil {
@@ -218,7 +224,9 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
fetchRequest := reqBody.(*FetchRequest)
- res := &FetchResponse{}
+ res := &FetchResponse{
+ Version: mfr.version,
+ }
for topic, partitions := range fetchRequest.blocks {
for partition, block := range partitions {
initialOffset := block.fetchOffset
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
index 5e15cdafe..6c01f959e 100644
--- a/vendor/github.com/Shopify/sarama/offset_manager.go
+++ b/vendor/github.com/Shopify/sarama/offset_manager.go
@@ -151,6 +151,13 @@ type PartitionOffsetManager interface {
// message twice, and your processing should ideally be idempotent.
MarkOffset(offset int64, metadata string)
+ // ResetOffset resets to the provided offset, alongside a metadata string that
+ // represents the state of the partition consumer at that point in time. Reset
+ // acts as a counterpart to MarkOffset, the difference being that it allows to
+ // reset an offset to an earlier or smaller value, where MarkOffset only
+ // allows incrementing the offset. cf MarkOffset for more details.
+ ResetOffset(offset int64, metadata string)
+
// Errors returns a read channel of errors that occur during offset management, if
// enabled. By default, errors are logged and not returned over this channel. If
// you want to implement any custom error handling, set your config's
@@ -329,6 +336,17 @@ func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
}
}
+func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if offset <= pom.offset {
+ pom.offset = offset
+ pom.metadata = metadata
+ pom.dirty = true
+ }
+}
+
func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()
diff --git a/vendor/github.com/Shopify/sarama/offset_manager_test.go b/vendor/github.com/Shopify/sarama/offset_manager_test.go
index c111a5a63..21e4947c6 100644
--- a/vendor/github.com/Shopify/sarama/offset_manager_test.go
+++ b/vendor/github.com/Shopify/sarama/offset_manager_test.go
@@ -204,6 +204,70 @@ func TestPartitionOffsetManagerNextOffset(t *testing.T) {
safeClose(t, testClient)
}
+func TestPartitionOffsetManagerResetOffset(t *testing.T) {
+ om, testClient, broker, coordinator := initOffsetManager(t)
+ pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
+
+ ocResponse := new(OffsetCommitResponse)
+ ocResponse.AddError("my_topic", 0, ErrNoError)
+ coordinator.Returns(ocResponse)
+
+ expected := int64(1)
+ pom.ResetOffset(expected, "modified_meta")
+ actual, meta := pom.NextOffset()
+
+ if actual != expected {
+ t.Errorf("Expected offset %v. Actual: %v", expected, actual)
+ }
+ if meta != "modified_meta" {
+ t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
+ }
+
+ safeClose(t, pom)
+ safeClose(t, om)
+ safeClose(t, testClient)
+ broker.Close()
+ coordinator.Close()
+}
+
+func TestPartitionOffsetManagerResetOffsetWithRetention(t *testing.T) {
+ om, testClient, broker, coordinator := initOffsetManager(t)
+ testClient.Config().Consumer.Offsets.Retention = time.Hour
+
+ pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
+
+ ocResponse := new(OffsetCommitResponse)
+ ocResponse.AddError("my_topic", 0, ErrNoError)
+ handler := func(req *request) (res encoder) {
+ if req.body.version() != 2 {
+ t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
+ }
+ offsetCommitRequest := req.body.(*OffsetCommitRequest)
+ if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
+ t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
+ }
+ return ocResponse
+ }
+ coordinator.setHandler(handler)
+
+ expected := int64(1)
+ pom.ResetOffset(expected, "modified_meta")
+ actual, meta := pom.NextOffset()
+
+ if actual != expected {
+ t.Errorf("Expected offset %v. Actual: %v", expected, actual)
+ }
+ if meta != "modified_meta" {
+ t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
+ }
+
+ safeClose(t, pom)
+ safeClose(t, om)
+ safeClose(t, testClient)
+ broker.Close()
+ coordinator.Close()
+}
+
func TestPartitionOffsetManagerMarkOffset(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
index d36db9210..dc0e7e947 100644
--- a/vendor/github.com/Shopify/sarama/utils.go
+++ b/vendor/github.com/Shopify/sarama/utils.go
@@ -3,7 +3,6 @@ package sarama
import (
"bufio"
"net"
- "sort"
)
type none struct{}
@@ -23,13 +22,11 @@ func (slice int32Slice) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
-func dupeAndSort(input []int32) []int32 {
+func dupInt32Slice(input []int32) []int32 {
ret := make([]int32, 0, len(input))
for _, val := range input {
ret = append(ret, val)
}
-
- sort.Sort(int32Slice(ret))
return ret
}
diff --git a/vendor/github.com/coreos/etcd/.travis.yml b/vendor/github.com/coreos/etcd/.travis.yml
index fb8e9b832..cdb30e67b 100644
--- a/vendor/github.com/coreos/etcd/.travis.yml
+++ b/vendor/github.com/coreos/etcd/.travis.yml
@@ -4,7 +4,7 @@ go_import_path: github.com/coreos/etcd
sudo: false
go:
- - 1.8.3
+ - 1.8.4
- tip
notifications:
diff --git a/vendor/github.com/coreos/etcd/Makefile b/vendor/github.com/coreos/etcd/Makefile
new file mode 100644
index 000000000..487346ae3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/Makefile
@@ -0,0 +1,120 @@
+# run from repository root
+
+TEST_SUFFIX = $(shell date +%s | base64 | head -c 15)
+
+.PHONY: build
+build:
+ GO_BUILD_FLAGS="-v" ./build
+ ./bin/etcd --version
+ ETCDCTL_API=3 ./bin/etcdctl version
+
+test:
+ $(info log-file: test-$(TEST_SUFFIX).log)
+ PASSES='fmt bom dep compile build unit' ./test 2>&1 | tee test-$(TEST_SUFFIX).log
+ ! grep FAIL -A10 -B50 test-$(TEST_SUFFIX).log
+
+test-all:
+ $(info log-file: test-all-$(TEST_SUFFIX).log)
+ RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional' ./test 2>&1 | tee test-all-$(TEST_SUFFIX).log
+ ! grep FAIL -A10 -B50 test-all-$(TEST_SUFFIX).log
+
+test-proxy:
+ $(info log-file: test-proxy-$(TEST_SUFFIX).log)
+ PASSES='build grpcproxy' ./test 2>&1 | tee test-proxy-$(TEST_SUFFIX).log
+ ! grep FAIL -A10 -B50 test-proxy-$(TEST_SUFFIX).log
+
+test-coverage:
+ $(info log-file: test-coverage-$(TEST_SUFFIX).log)
+ COVERDIR=covdir PASSES='build build_cov cov' ./test 2>&1 | tee test-coverage-$(TEST_SUFFIX).log
+ $(shell curl -s https://codecov.io/bash >codecov)
+ chmod 700 ./codecov
+ ./codecov -h
+ ./codecov -t 6040de41-c073-4d6f-bbf8-d89256ef31e1
+
+# clean up failed tests, logs, dependencies
+clean:
+ rm -f ./codecov
+ rm -f ./*.log
+ rm -f ./bin/Dockerfile-release
+ rm -rf ./bin/*.etcd
+ rm -rf ./gopath
+ rm -rf ./release
+ rm -f ./integration/127.0.0.1:* ./integration/localhost:*
+ rm -f ./clientv3/integration/127.0.0.1:* ./clientv3/integration/localhost:*
+ rm -f ./clientv3/ordering/127.0.0.1:* ./clientv3/ordering/localhost:*
+
+# sync with Dockerfile-test, e2e/docker-dns/Dockerfile, e2e/docker-dns-srv/Dockerfile
+_GO_VERSION = go1.8.4
+ifdef GO_VERSION
+ _GO_VERSION = $(GO_VERSION)
+endif
+
+# build base container image for testing on Linux
+docker-test-build:
+ docker build --tag gcr.io/etcd-development/etcd-test:$(_GO_VERSION) --file ./Dockerfile-test .
+
+# e.g.
+# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd.json)" https://gcr.io
+docker-test-push:
+ gcloud docker -- push gcr.io/etcd-development/etcd-test:$(_GO_VERSION)
+
+docker-test-pull:
+ docker pull gcr.io/etcd-development/etcd-test:$(_GO_VERSION)
+
+# compile etcd and etcdctl with Linux
+docker-test-compile:
+ docker run \
+ --rm \
+ --volume=`pwd`/:/etcd \
+ gcr.io/etcd-development/etcd-test:$(_GO_VERSION) \
+ /bin/bash -c "cd /etcd && GO_BUILD_FLAGS=-v ./build && ./bin/etcd --version"
+
+# run tests inside container
+docker-test:
+ $(info log-file: docker-test-$(TEST_SUFFIX).log)
+ docker run \
+ --rm \
+ --volume=`pwd`:/go/src/github.com/coreos/etcd \
+ gcr.io/etcd-development/etcd-test:$(_GO_VERSION) \
+ /bin/bash -c "RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional' ./test 2>&1 | tee docker-test-$(TEST_SUFFIX).log"
+ ! grep FAIL -A10 -B50 docker-test-$(TEST_SUFFIX).log
+
+docker-test-386:
+ $(info log-file: docker-test-386-$(TEST_SUFFIX).log)
+ docker run \
+ --rm \
+ --volume=`pwd`:/go/src/github.com/coreos/etcd \
+ gcr.io/etcd-development/etcd-test:$(_GO_VERSION) \
+ /bin/bash -c "GOARCH=386 PASSES='build unit integration_e2e' ./test 2>&1 | tee docker-test-386-$(TEST_SUFFIX).log"
+ ! grep FAIL -A10 -B50 docker-test-386-$(TEST_SUFFIX).log
+
+docker-test-proxy:
+ $(info log-file: docker-test-proxy-$(TEST_SUFFIX).log)
+ docker run \
+ --rm \
+ --volume=`pwd`:/go/src/github.com/coreos/etcd \
+ gcr.io/etcd-development/etcd-test:$(_GO_VERSION) \
+ /bin/bash -c "PASSES='build grpcproxy' ./test ./test 2>&1 | tee docker-test-proxy-$(TEST_SUFFIX).log"
+ ! grep FAIL -A10 -B50 docker-test-proxy-$(TEST_SUFFIX).log
+
+# build release container image with Linux
+_ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound")
+ifdef ETCD_VERSION
+ _ETCD_VERSION = $(ETCD_VERSION)
+endif
+
+docker-release-master-build: docker-test-compile
+ cp ./Dockerfile-release ./bin/Dockerfile-release
+ docker build \
+ --tag gcr.io/etcd-development/etcd:$(_ETCD_VERSION) \
+ --file ./bin/Dockerfile-release \
+ ./bin
+ rm -f ./bin/Dockerfile-release
+
+ docker run \
+ --rm \
+ gcr.io/etcd-development/etcd:$(_ETCD_VERSION) \
+ /bin/sh -c "/usr/local/bin/etcd --version && ETCDCTL_API=3 /usr/local/bin/etcdctl version"
+
+docker-release-master-push:
+ gcloud docker -- push gcr.io/etcd-development/etcd:$(_ETCD_VERSION)
diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go
index 498dfbcc8..19ce2ec01 100644
--- a/vendor/github.com/coreos/etcd/client/client.go
+++ b/vendor/github.com/coreos/etcd/client/client.go
@@ -372,12 +372,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err
}
- if isOneShot {
- return nil, nil, err
- }
- continue
- }
- if resp.StatusCode/100 == 5 {
+ } else if resp.StatusCode/100 == 5 {
switch resp.StatusCode {
case http.StatusInternalServerError, http.StatusServiceUnavailable:
// TODO: make sure this is a no leader response
@@ -385,10 +380,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
default:
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
}
- if isOneShot {
- return nil, nil, cerr.Errors[0]
+ err = cerr.Errors[0]
+ }
+ if err != nil {
+ if !isOneShot {
+ continue
}
- continue
+ c.Lock()
+ c.pinned = (k + 1) % leps
+ c.Unlock()
+ return nil, nil, err
}
if k != pinned {
c.Lock()
diff --git a/vendor/github.com/coreos/etcd/client/client_test.go b/vendor/github.com/coreos/etcd/client/client_test.go
index 4ab54d883..71c1b1340 100644
--- a/vendor/github.com/coreos/etcd/client/client_test.go
+++ b/vendor/github.com/coreos/etcd/client/client_test.go
@@ -16,6 +16,7 @@ package client
import (
"errors"
+ "fmt"
"io"
"io/ioutil"
"math/rand"
@@ -304,7 +305,9 @@ func TestHTTPClusterClientDo(t *testing.T) {
fakeErr := errors.New("fake!")
fakeURL := url.URL{}
tests := []struct {
- client *httpClusterClient
+ client *httpClusterClient
+ ctx context.Context
+
wantCode int
wantErr error
wantPinned int
@@ -395,10 +398,30 @@ func TestHTTPClusterClientDo(t *testing.T) {
wantCode: http.StatusTeapot,
wantPinned: 1,
},
+
+ // 500-level errors cause one shot Do to fallthrough to next endpoint
+ {
+ client: &httpClusterClient{
+ endpoints: []url.URL{fakeURL, fakeURL},
+ clientFactory: newStaticHTTPClientFactory(
+ []staticHTTPResponse{
+ {resp: http.Response{StatusCode: http.StatusBadGateway}},
+ {resp: http.Response{StatusCode: http.StatusTeapot}},
+ },
+ ),
+ rand: rand.New(rand.NewSource(0)),
+ },
+ ctx: context.WithValue(context.Background(), &oneShotCtxValue, &oneShotCtxValue),
+ wantErr: fmt.Errorf("client: etcd member returns server error [Bad Gateway]"),
+ wantPinned: 1,
+ },
}
for i, tt := range tests {
- resp, _, err := tt.client.Do(context.Background(), nil)
+ if tt.ctx == nil {
+ tt.ctx = context.Background()
+ }
+ resp, _, err := tt.client.Do(tt.ctx, nil)
if !reflect.DeepEqual(tt.wantErr, err) {
t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr)
continue
@@ -407,11 +430,9 @@ func TestHTTPClusterClientDo(t *testing.T) {
if resp == nil {
if tt.wantCode != 0 {
t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode)
+ continue
}
- continue
- }
-
- if resp.StatusCode != tt.wantCode {
+ } else if resp.StatusCode != tt.wantCode {
t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode)
continue
}
diff --git a/vendor/github.com/coreos/etcd/glide.lock b/vendor/github.com/coreos/etcd/glide.lock
index 1d866cf7c..080d717f0 100644
--- a/vendor/github.com/coreos/etcd/glide.lock
+++ b/vendor/github.com/coreos/etcd/glide.lock
@@ -1,5 +1,5 @@
-hash: cee1f2629857e9c2384ad89ff6014db09498c9af53771e5144ad3a4b510ff00e
-updated: 2017-05-30T10:29:08.22609283-07:00
+hash: e18fa8fb6e4dc1d7eb3cd538c90b0927f26e1ab0b04cbdd209d2d5c3233b7c5b
+updated: 2017-10-05T07:34:38.051011-07:00
imports:
- name: github.com/beorn7/perks
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
@@ -108,7 +108,7 @@ imports:
- name: github.com/xiang90/probing
version: 07dd2e8dfe18522e9c447ba95f2fe95262f63bb2
- name: golang.org/x/crypto
- version: 1351f936d976c60a0a48d728281922cf63eafb8d
+ version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3
subpackages:
- bcrypt
- blowfish
diff --git a/vendor/github.com/coreos/etcd/glide.yaml b/vendor/github.com/coreos/etcd/glide.yaml
index 90bcfeddc..6dfbbaa97 100644
--- a/vendor/github.com/coreos/etcd/glide.yaml
+++ b/vendor/github.com/coreos/etcd/glide.yaml
@@ -78,7 +78,7 @@ import:
- package: github.com/grpc-ecosystem/go-grpc-prometheus
version: v1.1
- package: golang.org/x/crypto
- version: 1351f936d976c60a0a48d728281922cf63eafb8d
+ version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3
subpackages:
- bcrypt
- blowfish
diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go
index 97a1d2059..6f8c40043 100644
--- a/vendor/github.com/coreos/etcd/version/version.go
+++ b/vendor/github.com/coreos/etcd/version/version.go
@@ -26,7 +26,7 @@ import (
var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0"
- Version = "3.2.6"
+ Version = "3.2.9"
APIVersion = "unknown"
// Git SHA Value will be set during build
diff --git a/vendor/github.com/emicklei/go-restful/.travis.yml b/vendor/github.com/emicklei/go-restful/.travis.yml
new file mode 100644
index 000000000..b22f8f547
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/.travis.yml
@@ -0,0 +1,6 @@
+language: go
+
+go:
+ - 1.x
+
+script: go test -v \ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md
index 45bd20129..d90aaa22e 100644
--- a/vendor/github.com/emicklei/go-restful/CHANGES.md
+++ b/vendor/github.com/emicklei/go-restful/CHANGES.md
@@ -1,27 +1,74 @@
Change history of go-restful
=
+2017-09-13
+- added route condition functions using `.If(func)` in route building.
+
+2017-02-16
+- solved issue #304, make operation names unique
+
+2017-01-30
+
+ [IMPORTANT] For swagger users, change your import statement to:
+ swagger "github.com/emicklei/go-restful-swagger12"
+
+- moved swagger 1.2 code to go-restful-swagger12
+- created TAG 2.0.0
+
+2017-01-27
+
+- remove defer request body close
+- expose Dispatch for testing filters and Routefunctions
+- swagger response model cannot be array
+- created TAG 1.0.0
+
+2016-12-22
+
+- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
+
+2016-11-26
+
+- Default change! now use CurlyRouter (was RouterJSR311)
+- Default change! no more caching of request content
+- Default change! do not recover from panics
+
+2016-09-22
+
+- fix the DefaultRequestContentType feature
+
+2016-02-14
+
+- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
+- add constructors for custom entity accessors for xml and json
+
2015-09-27
+
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
2015-09-25
+
- fixed problem with changing Header after WriteHeader (issue 235)
2015-09-14
+
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
- added support for custom EntityReaderWriters.
2015-08-06
+
- add support for reading entities from compressed request content
- use sync.Pool for compressors of http response and request body
- add Description to Parameter for documentation in Swagger UI
2015-03-20
+
- add configurable logging
2015-03-18
+
- if not specified, the Operation is derived from the Route function
2015-03-17
+
- expose Parameter creation functions
- make trace logger an interface
- fix OPTIONSFilter
@@ -30,21 +77,26 @@ Change history of go-restful
- add Notes to Route
2014-11-27
+
- (api add) PrettyPrint per response. (as proposed in #167)
2014-11-12
+
- (api add) ApiVersion(.) for documentation in Swagger UI
2014-11-10
+
- (api change) struct fields tagged with "description" show up in Swagger UI
2014-10-31
+
- (api change) ReturnsError -> Returns
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
- fix swagger nested structs
- sort Swagger response messages by code
2014-10-23
+
- (api add) ReturnsError allows you to document Http codes in swagger
- fixed problem with greedy CurlyRouter
- (api add) Access-Control-Max-Age in CORS
@@ -58,102 +110,117 @@ Change history of go-restful
- (api add) ParameterNamed for detailed documentation
2014-04-16
+
- (api add) expose constructor of Request for testing.
2014-06-27
+
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
2014-07-03
+
- (api add) CORS can be configured with a list of allowed domains
2014-03-12
+
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
2014-02-26
+
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
2014-02-17
+
- (api change) renamed parameter constants (go-lint checks)
2014-01-10
- - (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
+
+- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
2014-01-07
- - (api change) Write* methods in Response now return the error or nil.
- - added example of serving HTML from a Go template.
- - fixed comparing Allowed headers in CORS (is now case-insensitive)
+
+- (api change) Write* methods in Response now return the error or nil.
+- added example of serving HTML from a Go template.
+- fixed comparing Allowed headers in CORS (is now case-insensitive)
2013-11-13
- - (api add) Response knows how many bytes are written to the response body.
+
+- (api add) Response knows how many bytes are written to the response body.
2013-10-29
- - (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
+
+- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
2013-10-04
- - (api add) Response knows what HTTP status has been written
- - (api add) Request can have attributes (map of string->interface, also called request-scoped variables
+
+- (api add) Response knows what HTTP status has been written
+- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
2013-09-12
- - (api change) Router interface simplified
- - Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
+
+- (api change) Router interface simplified
+- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
2013-08-05
- add OPTIONS support
- add CORS support
2013-08-27
- - fixed some reported issues (see github)
- - (api change) deprecated use of WriteError; use WriteErrorString instead
+
+- fixed some reported issues (see github)
+- (api change) deprecated use of WriteError; use WriteErrorString instead
2014-04-15
- - (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
+
+- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
2013-08-08
- - (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
- - (api add) the swagger package has be extended to have a UI per container.
- - if panic is detected then a small stack trace is printed (thanks to runner-mei)
- - (api add) WriteErrorString to Response
+
+- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
+- (api add) the swagger package has be extended to have a UI per container.
+- if panic is detected then a small stack trace is printed (thanks to runner-mei)
+- (api add) WriteErrorString to Response
Important API changes:
- - (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
- - (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
+- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
+- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
2013-07-06
- - (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
+- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
2013-06-19
- - (improve) DoNotRecover option, moved request body closer, improved ReadEntity
+- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
2013-06-03
- - (api change) removed Dispatcher interface, hide PathExpression
- - changed receiver names of type functions to be more idiomatic Go
+- (api change) removed Dispatcher interface, hide PathExpression
+- changed receiver names of type functions to be more idiomatic Go
2013-06-02
- - (optimize) Cache the RegExp compilation of Paths.
+- (optimize) Cache the RegExp compilation of Paths.
2013-05-22
- - (api add) Added support for request/response filter functions
+- (api add) Added support for request/response filter functions
2013-05-18
- - (api add) Added feature to change the default Http Request Dispatch function (travis cline)
- - (api change) Moved Swagger Webservice to swagger package (see example restful-user)
+- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
+- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
[2012-11-14 .. 2013-05-18>
- - See https://github.com/emicklei/go-restful/commits
+- See https://github.com/emicklei/go-restful/commits
2012-11-14
- - Initial commit
+- Initial commit
diff --git a/vendor/github.com/emicklei/go-restful/Makefile b/vendor/github.com/emicklei/go-restful/Makefile
new file mode 100644
index 000000000..b40081cc0
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/Makefile
@@ -0,0 +1,7 @@
+all: test
+
+test:
+ go test -v .
+
+ex:
+ cd examples && ls *.go | xargs go build -o /tmp/ignore \ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md
index 8f954c016..002a08d96 100644
--- a/vendor/github.com/emicklei/go-restful/README.md
+++ b/vendor/github.com/emicklei/go-restful/README.md
@@ -1,8 +1,13 @@
go-restful
==========
-
package for building REST-style Web Services using Google Go
+[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
+[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
+[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful)
+
+- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
+
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
- GET = Retrieve a representation of a resource
@@ -40,35 +45,31 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
- Configurable router:
- - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but doest **not** accept) regular expressions (See RouterJSR311 which is used by default)
- - Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter)
+ - (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
+ - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
- Response API for writing structs to JSON/XML and setting headers
+- Customizable encoding using EntityReaderWriter registration
- Filters for intercepting the request &#8594; response flow on Service or Route level
- Request-scoped variables using attributes
- Containers for WebServices on different HTTP endpoints
- Content encoding (gzip,deflate) of request and response payloads
- Automatic responses on OPTIONS (using a filter)
- Automatic CORS request handling (using a filter)
-- API declaration for Swagger UI (see swagger package)
+- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12))
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
- Configurable (trace) logging
-- Customizable encoding using EntityReaderWriter registration
- Customizable gzip/deflate readers and writers using CompressorProvider registration
### Resources
-- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful)
-- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
-- [Example posted on blog](http://ernestmicklei.com/2012/11/24/go-restful-first-working-example/)
-- [Design explained on blog](http://ernestmicklei.com/2012/11/11/go-restful-api-design/)
+- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
+- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
-- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1)
+- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia)
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
-[![Build Status](https://drone.io/github.com/emicklei/go-restful/status.png)](https://drone.io/github.com/emicklei/go-restful/latest)
-
-(c) 2012 - 2015, http://ernestmicklei.com. MIT License
+Type ```git shortlog -s``` for a full list of contributors.
-Type ```git shortlog -s``` for a full list of contributors. \ No newline at end of file
+© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.
diff --git a/vendor/github.com/emicklei/go-restful/compress.go b/vendor/github.com/emicklei/go-restful/compress.go
index 66f3603e4..220b37712 100644
--- a/vendor/github.com/emicklei/go-restful/compress.go
+++ b/vendor/github.com/emicklei/go-restful/compress.go
@@ -5,10 +5,12 @@ package restful
// that can be found in the LICENSE file.
import (
+ "bufio"
"compress/gzip"
"compress/zlib"
"errors"
"io"
+ "net"
"net/http"
"strings"
)
@@ -69,6 +71,17 @@ func (c *CompressingResponseWriter) isCompressorClosed() bool {
return nil == c.compressor
}
+// Hijack implements the Hijacker interface
+// This is especially useful when combining Container.EnabledContentEncoding
+// in combination with websockets (for instance gorilla/websocket)
+func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hijacker, ok := c.writer.(http.Hijacker)
+ if !ok {
+ return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
+ }
+ return hijacker.Hijack()
+}
+
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
diff --git a/vendor/github.com/emicklei/go-restful/compress_test.go b/vendor/github.com/emicklei/go-restful/compress_test.go
index 84a93c3fc..cc3e93d54 100644
--- a/vendor/github.com/emicklei/go-restful/compress_test.go
+++ b/vendor/github.com/emicklei/go-restful/compress_test.go
@@ -94,7 +94,6 @@ func TestGzipDecompressRequestBody(t *testing.T) {
httpRequest.Header.Set("Content-Encoding", "gzip")
req.Request = httpRequest
- doCacheReadEntityBytes = false
doc := make(map[string]interface{})
req.ReadEntity(&doc)
@@ -117,7 +116,6 @@ func TestZlibDecompressRequestBody(t *testing.T) {
httpRequest.Header.Set("Content-Encoding", "deflate")
req.Request = httpRequest
- doCacheReadEntityBytes = false
doc := make(map[string]interface{})
req.ReadEntity(&doc)
diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/compressors.go
index f028456e0..9db4a8c8e 100644
--- a/vendor/github.com/emicklei/go-restful/compressors.go
+++ b/vendor/github.com/emicklei/go-restful/compressors.go
@@ -9,25 +9,26 @@ import (
"compress/zlib"
)
+// CompressorProvider describes a component that can provider compressors for the std methods.
type CompressorProvider interface {
// Returns a *gzip.Writer which needs to be released later.
// Before using it, call Reset().
AcquireGzipWriter() *gzip.Writer
- // Releases an aqcuired *gzip.Writer.
+ // Releases an acquired *gzip.Writer.
ReleaseGzipWriter(w *gzip.Writer)
// Returns a *gzip.Reader which needs to be released later.
AcquireGzipReader() *gzip.Reader
- // Releases an aqcuired *gzip.Reader.
+ // Releases an acquired *gzip.Reader.
ReleaseGzipReader(w *gzip.Reader)
// Returns a *zlib.Writer which needs to be released later.
// Before using it, call Reset().
AcquireZlibWriter() *zlib.Writer
- // Releases an aqcuired *zlib.Writer.
+ // Releases an acquired *zlib.Writer.
ReleaseZlibWriter(w *zlib.Writer)
}
@@ -44,7 +45,7 @@ func CurrentCompressorProvider() CompressorProvider {
return currentCompressorProvider
}
-// CompressorProvider sets the actual provider of compressors (zlib or gzip).
+// SetCompressorProvider sets the actual provider of compressors (zlib or gzip).
func SetCompressorProvider(p CompressorProvider) {
if p == nil {
panic("cannot set compressor provider to nil")
diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go
index 59f34abea..4196180e5 100644
--- a/vendor/github.com/emicklei/go-restful/container.go
+++ b/vendor/github.com/emicklei/go-restful/container.go
@@ -6,6 +6,7 @@ package restful
import (
"bytes"
+ "errors"
"fmt"
"net/http"
"os"
@@ -24,24 +25,24 @@ type Container struct {
ServeMux *http.ServeMux
isRegisteredOnRoot bool
containerFilters []FilterFunction
- doNotRecover bool // default is false
+ doNotRecover bool // default is true
recoverHandleFunc RecoverHandleFunction
serviceErrorHandleFunc ServiceErrorHandleFunction
- router RouteSelector // default is a RouterJSR311, CurlyRouter is the faster alternative
+ router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative)
contentEncodingEnabled bool // default is false
}
-// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311)
+// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
func NewContainer() *Container {
return &Container{
webServices: []*WebService{},
ServeMux: http.NewServeMux(),
isRegisteredOnRoot: false,
containerFilters: []FilterFunction{},
- doNotRecover: false,
+ doNotRecover: true,
recoverHandleFunc: logStackOnRecover,
serviceErrorHandleFunc: writeServiceError,
- router: RouterJSR311{},
+ router: CurlyRouter{},
contentEncodingEnabled: false}
}
@@ -68,12 +69,12 @@ func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
// DoNotRecover controls whether panics will be caught to return HTTP 500.
// If set to true, Route functions are responsible for handling any error situation.
-// Default value is false = recover from panics. This has performance implications.
+// Default value is true.
func (c *Container) DoNotRecover(doNot bool) {
c.doNotRecover = doNot
}
-// Router changes the default Router (currently RouterJSR311)
+// Router changes the default Router (currently CurlyRouter)
func (c *Container) Router(aRouter RouteSelector) {
c.router = aRouter
}
@@ -83,34 +84,16 @@ func (c *Container) EnableContentEncoding(enabled bool) {
c.contentEncodingEnabled = enabled
}
-// Add a WebService to the Container. It will detect duplicate root paths and panic in that case.
+// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
func (c *Container) Add(service *WebService) *Container {
c.webServicesLock.Lock()
defer c.webServicesLock.Unlock()
- // If registered on root then no additional specific mapping is needed
- if !c.isRegisteredOnRoot {
- pattern := c.fixedPrefixPath(service.RootPath())
- // check if root path registration is needed
- if "/" == pattern || "" == pattern {
- c.ServeMux.HandleFunc("/", c.dispatch)
- c.isRegisteredOnRoot = true
- } else {
- // detect if registration already exists
- alreadyMapped := false
- for _, each := range c.webServices {
- if each.RootPath() == service.RootPath() {
- alreadyMapped = true
- break
- }
- }
- if !alreadyMapped {
- c.ServeMux.HandleFunc(pattern, c.dispatch)
- if !strings.HasSuffix(pattern, "/") {
- c.ServeMux.HandleFunc(pattern+"/", c.dispatch)
- }
- }
- }
+
+ // if rootPath was not set then lazy initialize it
+ if len(service.rootPath) == 0 {
+ service.Path("/")
}
+
// cannot have duplicate root paths
for _, each := range c.webServices {
if each.RootPath() == service.RootPath() {
@@ -118,24 +101,64 @@ func (c *Container) Add(service *WebService) *Container {
os.Exit(1)
}
}
- // if rootPath was not set then lazy initialize it
- if len(service.rootPath) == 0 {
- service.Path("/")
+
+ // If not registered on root then add specific mapping
+ if !c.isRegisteredOnRoot {
+ c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
}
c.webServices = append(c.webServices, service)
return c
}
+// addHandler may set a new HandleFunc for the serveMux
+// this function must run inside the critical region protected by the webServicesLock.
+// returns true if the function was registered on root ("/")
+func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
+ pattern := fixedPrefixPath(service.RootPath())
+ // check if root path registration is needed
+ if "/" == pattern || "" == pattern {
+ serveMux.HandleFunc("/", c.dispatch)
+ return true
+ }
+ // detect if registration already exists
+ alreadyMapped := false
+ for _, each := range c.webServices {
+ if each.RootPath() == service.RootPath() {
+ alreadyMapped = true
+ break
+ }
+ }
+ if !alreadyMapped {
+ serveMux.HandleFunc(pattern, c.dispatch)
+ if !strings.HasSuffix(pattern, "/") {
+ serveMux.HandleFunc(pattern+"/", c.dispatch)
+ }
+ }
+ return false
+}
+
func (c *Container) Remove(ws *WebService) error {
+ if c.ServeMux == http.DefaultServeMux {
+ errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
+ log.Print(errMsg)
+ return errors.New(errMsg)
+ }
c.webServicesLock.Lock()
defer c.webServicesLock.Unlock()
+ // build a new ServeMux and re-register all WebServices
+ newServeMux := http.NewServeMux()
newServices := []*WebService{}
- for ix := range c.webServices {
- if c.webServices[ix].rootPath != ws.rootPath {
- newServices = append(newServices, c.webServices[ix])
+ newIsRegisteredOnRoot := false
+ for _, each := range c.webServices {
+ if each.rootPath != ws.rootPath {
+ // If not registered on root then add specific mapping
+ if !newIsRegisteredOnRoot {
+ newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
+ }
+ newServices = append(newServices, each)
}
}
- c.webServices = newServices
+ c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
return nil
}
@@ -166,6 +189,17 @@ func writeServiceError(err ServiceError, req *Request, resp *Response) {
}
// Dispatch the incoming Http Request to a matching WebService.
+func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
+ if httpWriter == nil {
+ panic("httpWriter cannot be nil")
+ }
+ if httpRequest == nil {
+ panic("httpRequest cannot be nil")
+ }
+ c.dispatch(httpWriter, httpRequest)
+}
+
+// Dispatch the incoming Http Request to a matching WebService.
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
writer := httpWriter
@@ -185,12 +219,6 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R
}
}()
}
- // Install closing the request body (if any)
- defer func() {
- if nil != httpRequest.Body {
- httpRequest.Body.Close()
- }
- }()
// Detect if compression is needed
// assume without compression, test for override
@@ -251,7 +279,7 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R
}
// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
-func (c Container) fixedPrefixPath(pathspec string) string {
+func fixedPrefixPath(pathspec string) string {
varBegin := strings.Index(pathspec, "{")
if -1 == varBegin {
return pathspec
@@ -260,12 +288,12 @@ func (c Container) fixedPrefixPath(pathspec string) string {
}
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
-func (c Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
+func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
c.ServeMux.ServeHTTP(httpwriter, httpRequest)
}
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
-func (c Container) Handle(pattern string, handler http.Handler) {
+func (c *Container) Handle(pattern string, handler http.Handler) {
c.ServeMux.Handle(pattern, handler)
}
@@ -295,7 +323,7 @@ func (c *Container) Filter(filter FilterFunction) {
}
// RegisteredWebServices returns the collections of added WebServices
-func (c Container) RegisteredWebServices() []*WebService {
+func (c *Container) RegisteredWebServices() []*WebService {
c.webServicesLock.RLock()
defer c.webServicesLock.RUnlock()
result := make([]*WebService, len(c.webServices))
@@ -306,7 +334,7 @@ func (c Container) RegisteredWebServices() []*WebService {
}
// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
-func (c Container) computeAllowedMethods(req *Request) []string {
+func (c *Container) computeAllowedMethods(req *Request) []string {
// Go through all RegisteredWebServices() and all its Routes to collect the options
methods := []string{}
requestPath := req.Request.URL.Path
diff --git a/vendor/github.com/emicklei/go-restful/container_test.go b/vendor/github.com/emicklei/go-restful/container_test.go
index dd2552c37..491c793ab 100644
--- a/vendor/github.com/emicklei/go-restful/container_test.go
+++ b/vendor/github.com/emicklei/go-restful/container_test.go
@@ -59,3 +59,25 @@ func TestContainer_HandleWithFilter(t *testing.T) {
t.Errorf("handler added by calling HandleWithFilter wasn't called")
}
}
+
+func TestContainerAddAndRemove(t *testing.T) {
+ ws1 := new(WebService).Path("/")
+ ws2 := new(WebService).Path("/users")
+ wc := NewContainer()
+ wc.Add(ws1)
+ wc.Add(ws2)
+ wc.Remove(ws2)
+ if len(wc.webServices) != 1 {
+ t.Errorf("expected one webservices")
+ }
+ if !wc.isRegisteredOnRoot {
+ t.Errorf("expected on root registered")
+ }
+ wc.Remove(ws1)
+ if len(wc.webServices) > 0 {
+ t.Errorf("expected zero webservices")
+ }
+ if wc.isRegisteredOnRoot {
+ t.Errorf("expected not on root registered")
+ }
+}
diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/cors_filter.go
index cd9e7fd29..1efeef072 100644
--- a/vendor/github.com/emicklei/go-restful/cors_filter.go
+++ b/vendor/github.com/emicklei/go-restful/cors_filter.go
@@ -5,6 +5,7 @@ package restful
// that can be found in the LICENSE file.
import (
+ "regexp"
"strconv"
"strings"
)
@@ -19,11 +20,13 @@ import (
type CrossOriginResourceSharing struct {
ExposeHeaders []string // list of Header names
AllowedHeaders []string // list of Header names
- AllowedDomains []string // list of allowed values for Http Origin. If empty all are allowed.
+ AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
AllowedMethods []string
MaxAge int // number of seconds before requiring new Options request
CookiesAllowed bool
Container *Container
+
+ allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
}
// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
@@ -37,21 +40,12 @@ func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *
chain.ProcessFilter(req, resp)
return
}
- if len(c.AllowedDomains) > 0 { // if provided then origin must be included
- included := false
- for _, each := range c.AllowedDomains {
- if each == origin {
- included = true
- break
- }
- }
- if !included {
- if trace {
- traceLogger.Printf("HTTP Origin:%s is not part of %v", origin, c.AllowedDomains)
- }
- chain.ProcessFilter(req, resp)
- return
+ if !c.isOriginAllowed(origin) { // check whether this origin is allowed
+ if trace {
+ traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
}
+ chain.ProcessFilter(req, resp)
+ return
}
if req.Request.Method != "OPTIONS" {
c.doActualRequest(req, resp)
@@ -74,7 +68,11 @@ func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response
func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
if len(c.AllowedMethods) == 0 {
- c.AllowedMethods = c.Container.computeAllowedMethods(req)
+ if c.Container == nil {
+ c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
+ } else {
+ c.AllowedMethods = c.Container.computeAllowedMethods(req)
+ }
}
acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
@@ -124,13 +122,32 @@ func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
if len(c.AllowedDomains) == 0 {
return true
}
+
allowed := false
- for _, each := range c.AllowedDomains {
- if each == origin {
+ for _, domain := range c.AllowedDomains {
+ if domain == origin {
allowed = true
break
}
}
+
+ if !allowed {
+ if len(c.allowedOriginPatterns) == 0 {
+ // compile allowed domains to allowed origin patterns
+ allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
+ if err != nil {
+ return false
+ }
+ c.allowedOriginPatterns = allowedOriginRegexps
+ }
+
+ for _, pattern := range c.allowedOriginPatterns {
+ if allowed = pattern.MatchString(origin); allowed {
+ break
+ }
+ }
+ }
+
return allowed
}
@@ -170,3 +187,16 @@ func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header str
}
return false
}
+
+// Take a list of strings and compile them into a list of regular expressions.
+func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
+ regexps := []*regexp.Regexp{}
+ for _, regexpStr := range regexpStrings {
+ r, err := regexp.Compile(regexpStr)
+ if err != nil {
+ return regexps, err
+ }
+ regexps = append(regexps, r)
+ }
+ return regexps, nil
+}
diff --git a/vendor/github.com/emicklei/go-restful/cors_filter_test.go b/vendor/github.com/emicklei/go-restful/cors_filter_test.go
index 9b4723089..09c5d3300 100644
--- a/vendor/github.com/emicklei/go-restful/cors_filter_test.go
+++ b/vendor/github.com/emicklei/go-restful/cors_filter_test.go
@@ -29,7 +29,7 @@ func TestCORSFilter_Preflight(t *testing.T) {
httpRequest.Header.Set(HEADER_AccessControlRequestHeaders, "X-Custom-Header, X-Additional-Header")
httpWriter := httptest.NewRecorder()
- DefaultContainer.dispatch(httpWriter, httpRequest)
+ DefaultContainer.Dispatch(httpWriter, httpRequest)
actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin)
if "http://api.bob.com" != actual {
@@ -78,7 +78,7 @@ func TestCORSFilter_Actual(t *testing.T) {
httpRequest.Header.Set("X-Custom-Header", "value")
httpWriter := httptest.NewRecorder()
- DefaultContainer.dispatch(httpWriter, httpRequest)
+ DefaultContainer.Dispatch(httpWriter, httpRequest)
actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin)
if "http://api.bob.com" != actual {
t.Fatal("expected: http://api.bob.com but got:" + actual)
@@ -89,11 +89,15 @@ func TestCORSFilter_Actual(t *testing.T) {
}
var allowedDomainInput = []struct {
- domains []string
- origin string
- accepted bool
+ domains []string
+ origin string
+ allowed bool
}{
{[]string{}, "http://anything.com", true},
+ {[]string{"example.com"}, "example.com", true},
+ {[]string{"example.com"}, "not-allowed", false},
+ {[]string{"not-matching.com", "example.com"}, "example.com", true},
+ {[]string{".*"}, "example.com", true},
}
// go test -v -test.run TestCORSFilter_AllowedDomains ...restful
@@ -113,12 +117,12 @@ func TestCORSFilter_AllowedDomains(t *testing.T) {
httpRequest, _ := http.NewRequest("PUT", "http://api.his.com/cors", nil)
httpRequest.Header.Set(HEADER_Origin, each.origin)
httpWriter := httptest.NewRecorder()
- DefaultContainer.dispatch(httpWriter, httpRequest)
+ DefaultContainer.Dispatch(httpWriter, httpRequest)
actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin)
- if actual != each.origin && each.accepted {
+ if actual != each.origin && each.allowed {
t.Fatal("expected to be accepted")
}
- if actual == each.origin && !each.accepted {
+ if actual == each.origin && !each.allowed {
t.Fatal("did not expect to be accepted")
}
}
diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go
index ce284f747..79f1f5aa2 100644
--- a/vendor/github.com/emicklei/go-restful/curly.go
+++ b/vendor/github.com/emicklei/go-restful/curly.go
@@ -44,16 +44,16 @@ func (c CurlyRouter) SelectRoute(
}
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
-func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) []Route {
- candidates := &sortableCurlyRoutes{[]*curlyRoute{}}
+func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
+ candidates := sortableCurlyRoutes{}
for _, each := range ws.routes {
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
if matches {
- candidates.add(&curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
+ candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
}
}
sort.Sort(sort.Reverse(candidates))
- return candidates.routes()
+ return candidates
}
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
@@ -108,11 +108,13 @@ func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, reque
return (matched && err == nil), false
}
+var jsr311Router = RouterJSR311{}
+
// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
// headers of the Request. See also RouterJSR311 in jsr311.go
-func (c CurlyRouter) detectRoute(candidateRoutes []Route, httpRequest *http.Request) (*Route, error) {
+func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
// tracing is done inside detectRoute
- return RouterJSR311{}.detectRoute(candidateRoutes, httpRequest)
+ return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest)
}
// detectWebService returns the best matching webService given the list of path tokens.
diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/curly_route.go
index 3edab72fd..296f94650 100644
--- a/vendor/github.com/emicklei/go-restful/curly_route.go
+++ b/vendor/github.com/emicklei/go-restful/curly_route.go
@@ -11,30 +11,28 @@ type curlyRoute struct {
staticCount int
}
-type sortableCurlyRoutes struct {
- candidates []*curlyRoute
-}
+type sortableCurlyRoutes []curlyRoute
-func (s *sortableCurlyRoutes) add(route *curlyRoute) {
- s.candidates = append(s.candidates, route)
+func (s *sortableCurlyRoutes) add(route curlyRoute) {
+ *s = append(*s, route)
}
-func (s *sortableCurlyRoutes) routes() (routes []Route) {
- for _, each := range s.candidates {
+func (s sortableCurlyRoutes) routes() (routes []Route) {
+ for _, each := range s {
routes = append(routes, each.route) // TODO change return type
}
return routes
}
-func (s *sortableCurlyRoutes) Len() int {
- return len(s.candidates)
+func (s sortableCurlyRoutes) Len() int {
+ return len(s)
}
-func (s *sortableCurlyRoutes) Swap(i, j int) {
- s.candidates[i], s.candidates[j] = s.candidates[j], s.candidates[i]
+func (s sortableCurlyRoutes) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
}
-func (s *sortableCurlyRoutes) Less(i, j int) bool {
- ci := s.candidates[i]
- cj := s.candidates[j]
+func (s sortableCurlyRoutes) Less(i, j int) bool {
+ ci := s[i]
+ cj := s[j]
// primary key
if ci.staticCount < cj.staticCount {
diff --git a/vendor/github.com/emicklei/go-restful/curly_test.go b/vendor/github.com/emicklei/go-restful/curly_test.go
index 31d66dcbd..bec017ca7 100644
--- a/vendor/github.com/emicklei/go-restful/curly_test.go
+++ b/vendor/github.com/emicklei/go-restful/curly_test.go
@@ -163,12 +163,12 @@ func TestCurly_ISSUE_34(t *testing.T) {
ws1 := new(WebService).Path("/")
ws1.Route(ws1.GET("/{type}/{id}").To(curlyDummy))
ws1.Route(ws1.GET("/network/{id}").To(curlyDummy))
- routes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12"))
- if len(routes) != 2 {
+ croutes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12"))
+ if len(croutes) != 2 {
t.Fatal("expected 2 routes")
}
- if routes[0].Path != "/network/{id}" {
- t.Error("first is", routes[0].Path)
+ if got, want := croutes[0].route.Path, "/network/{id}"; got != want {
+ t.Errorf("got %v want %v", got, want)
}
}
@@ -177,12 +177,12 @@ func TestCurly_ISSUE_34_2(t *testing.T) {
ws1 := new(WebService)
ws1.Route(ws1.GET("/network/{id}").To(curlyDummy))
ws1.Route(ws1.GET("/{type}/{id}").To(curlyDummy))
- routes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12"))
- if len(routes) != 2 {
+ croutes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12"))
+ if len(croutes) != 2 {
t.Fatal("expected 2 routes")
}
- if routes[0].Path != "/network/{id}" {
- t.Error("first is", routes[0].Path)
+ if got, want := croutes[0].route.Path, "/network/{id}"; got != want {
+ t.Errorf("got %v want %v", got, want)
}
}
diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/doc.go
index d40405bf7..f7c16b01f 100644
--- a/vendor/github.com/emicklei/go-restful/doc.go
+++ b/vendor/github.com/emicklei/go-restful/doc.go
@@ -1,5 +1,5 @@
/*
-Package restful, a lean package for creating REST-style WebServices without magic.
+Package restful , a lean package for creating REST-style WebServices without magic.
WebServices and Routes
@@ -145,22 +145,11 @@ Performance options
This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
- restful.DefaultContainer.Router(CurlyRouter{})
-
-The default router is the RouterJSR311 which is an implementation of its spec (http://jsr311.java.net/nonav/releases/1.1/spec/spec.html).
-However, it uses regular expressions for all its routes which, depending on your usecase, may consume a significant amount of time.
-The CurlyRouter implementation is more lightweight that also allows you to use wildcards and expressions, but only if needed.
-
- restful.DefaultContainer.DoNotRecover(true)
+ restful.DefaultContainer.DoNotRecover(false)
DoNotRecover controls whether panics will be caught to return HTTP 500.
-If set to true, Route functions are responsible for handling any error situation.
-Default value is false; it will recover from panics. This has performance implications.
-
- restful.SetCacheReadEntity(false)
-
-SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
-If you expect to read large amounts of payload data, and you do not use this feature, you should set it to false.
+If set to false, the container will recover from panics.
+Default value is true
restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go
index e3ab79d9b..6ecf6c7f8 100644
--- a/vendor/github.com/emicklei/go-restful/entity_accessors.go
+++ b/vendor/github.com/emicklei/go-restful/entity_accessors.go
@@ -36,8 +36,8 @@ type entityReaderWriters struct {
}
func init() {
- RegisterEntityAccessor(MIME_JSON, entityJSONAccess{ContentType: MIME_JSON})
- RegisterEntityAccessor(MIME_XML, entityXMLAccess{ContentType: MIME_XML})
+ RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
+ RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
}
// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
@@ -47,8 +47,20 @@ func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
entityAccessRegistry.accessors[mime] = erw
}
-// AccessorAt returns the registered ReaderWriter for this MIME type.
-func (r *entityReaderWriters) AccessorAt(mime string) (EntityReaderWriter, bool) {
+// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
+// This package is already initialized with such an accessor using the MIME_JSON contentType.
+func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
+ return entityJSONAccess{ContentType: contentType}
+}
+
+// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
+// This package is already initialized with such an accessor using the MIME_XML contentType.
+func NewEntityAccessorXML(contentType string) EntityReaderWriter {
+ return entityXMLAccess{ContentType: contentType}
+}
+
+// accessorAt returns the registered ReaderWriter for this MIME type.
+func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
r.protection.RLock()
defer r.protection.RUnlock()
er, ok := r.accessors[mime]
diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors_test.go b/vendor/github.com/emicklei/go-restful/entity_accessors_test.go
index 943093ae0..d1c1e1585 100644
--- a/vendor/github.com/emicklei/go-restful/entity_accessors_test.go
+++ b/vendor/github.com/emicklei/go-restful/entity_accessors_test.go
@@ -49,7 +49,7 @@ func TestKeyValueEncoding(t *testing.T) {
// Write
httpWriter := httptest.NewRecorder()
// Accept Produces
- resp := Response{httpWriter, "application/kv,*/*;q=0.8", []string{"application/kv"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "application/kv,*/*;q=0.8", routeProduces: []string{"application/kv"}, prettyPrint: true}
resp.WriteEntity(b)
t.Log(string(httpWriter.Body.Bytes()))
if !kv.writeCalled {
diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/filter.go
index 4b86656e1..c23bfb591 100644
--- a/vendor/github.com/emicklei/go-restful/filter.go
+++ b/vendor/github.com/emicklei/go-restful/filter.go
@@ -24,3 +24,12 @@ func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
type FilterFunction func(*Request, *Response, *FilterChain)
+
+// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching
+// See examples/restful-no-cache-filter.go for usage
+func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) {
+ resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
+ resp.Header().Set("Pragma", "no-cache") // HTTP 1.0.
+ resp.Header().Set("Expires", "0") // Proxies.
+ chain.ProcessFilter(req, resp)
+}
diff --git a/vendor/github.com/emicklei/go-restful/install.sh b/vendor/github.com/emicklei/go-restful/install.sh
deleted file mode 100644
index 5fe03b569..000000000
--- a/vendor/github.com/emicklei/go-restful/install.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-cd examples
- ls *.go | xargs -I {} go build -o /tmp/ignore {}
- cd ..
-go fmt ...swagger && \
-go test -test.v ...swagger && \
-go install ...swagger && \
-go fmt ...restful && \
-go test -test.v ...restful && \
-go install ...restful \ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go
index b4fa9bbae..9e8122416 100644
--- a/vendor/github.com/emicklei/go-restful/jsr311.go
+++ b/vendor/github.com/emicklei/go-restful/jsr311.go
@@ -41,9 +41,29 @@ func (r RouterJSR311) SelectRoute(
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
+ ifOk := []Route{}
+ for _, each := range routes {
+ ok := true
+ for _, fn := range each.If {
+ if !fn(httpRequest) {
+ ok = false
+ break
+ }
+ }
+ if ok {
+ ifOk = append(ifOk, each)
+ }
+ }
+ if len(ifOk) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes))
+ }
+ return nil, NewError(http.StatusNotFound, "404: Not Found")
+ }
+
// http method
methodOk := []Route{}
- for _, each := range routes {
+ for _, each := range ifOk {
if httpRequest.Method == each.Method {
methodOk = append(methodOk, each)
}
@@ -74,7 +94,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
// accept
outputMediaOk := []Route{}
accept := httpRequest.Header.Get(HEADER_Accept)
- if accept == "" {
+ if len(accept) == 0 {
accept = "*/*"
}
for _, each := range inputMediaOk {
@@ -88,7 +108,8 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
}
return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
}
- return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
+ // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
+ return &outputMediaOk[0], nil
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
diff --git a/vendor/github.com/emicklei/go-restful/jsr311_test.go b/vendor/github.com/emicklei/go-restful/jsr311_test.go
index 3e79a6def..ecde60366 100644
--- a/vendor/github.com/emicklei/go-restful/jsr311_test.go
+++ b/vendor/github.com/emicklei/go-restful/jsr311_test.go
@@ -2,6 +2,7 @@ package restful
import (
"io"
+ "net/http"
"sort"
"testing"
)
@@ -209,4 +210,42 @@ func TestSortableRouteCandidates(t *testing.T) {
}
}
+func TestDetectRouteReturns404IfNoRoutePassesConditions(t *testing.T) {
+ called := false
+ shouldNotBeCalledButWas := false
+
+ routes := []Route{
+ new(RouteBuilder).To(dummy).
+ If(func(req *http.Request) bool { return false }).
+ Build(),
+
+ // check that condition functions are called in order
+ new(RouteBuilder).
+ To(dummy).
+ If(func(req *http.Request) bool { return true }).
+ If(func(req *http.Request) bool { called = true; return false }).
+ Build(),
+
+ // check that condition functions short circuit
+ new(RouteBuilder).
+ To(dummy).
+ If(func(req *http.Request) bool { return false }).
+ If(func(req *http.Request) bool { shouldNotBeCalledButWas = true; return false }).
+ Build(),
+ }
+
+ _, err := RouterJSR311{}.detectRoute(routes, (*http.Request)(nil))
+ if se := err.(ServiceError); se.Code != 404 {
+ t.Fatalf("expected 404, got %d", se.Code)
+ }
+
+ if !called {
+ t.Fatal("expected condition function to get called, but it wasn't")
+ }
+
+ if shouldNotBeCalledButWas {
+ t.Fatal("expected condition function to not be called, but it was")
+ }
+}
+
func dummy(req *Request, resp *Response) { io.WriteString(resp.ResponseWriter, "dummy") }
diff --git a/vendor/github.com/emicklei/go-restful/log/log.go b/vendor/github.com/emicklei/go-restful/log/log.go
index f70d89524..6cd44c7a5 100644
--- a/vendor/github.com/emicklei/go-restful/log/log.go
+++ b/vendor/github.com/emicklei/go-restful/log/log.go
@@ -5,7 +5,7 @@ import (
"os"
)
-// Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
+// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
type StdLogger interface {
Print(v ...interface{})
Printf(format string, v ...interface{})
@@ -18,14 +18,17 @@ func init() {
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
}
+// SetLogger sets the logger for this package
func SetLogger(customLogger StdLogger) {
Logger = customLogger
}
+// Print delegates to the Logger
func Print(v ...interface{}) {
Logger.Print(v...)
}
+// Printf delegates to the Logger
func Printf(format string, v ...interface{}) {
Logger.Printf(format, v...)
}
diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/logger.go
index 3f1c4db86..6595df002 100644
--- a/vendor/github.com/emicklei/go-restful/logger.go
+++ b/vendor/github.com/emicklei/go-restful/logger.go
@@ -21,7 +21,7 @@ func TraceLogger(logger log.StdLogger) {
EnableTracing(logger != nil)
}
-// expose the setter for the global logger on the top-level package
+// SetLogger exposes the setter for the global logger on the top-level package
func SetLogger(customLogger log.StdLogger) {
log.SetLogger(customLogger)
}
diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/mime.go
new file mode 100644
index 000000000..d7ea2b615
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/mime.go
@@ -0,0 +1,45 @@
+package restful
+
+import (
+ "strconv"
+ "strings"
+)
+
+type mime struct {
+ media string
+ quality float64
+}
+
+// insertMime adds a mime to a list and keeps it sorted by quality.
+func insertMime(l []mime, e mime) []mime {
+ for i, each := range l {
+ // if current mime has lower quality then insert before
+ if e.quality > each.quality {
+ left := append([]mime{}, l[0:i]...)
+ return append(append(left, e), l[i:]...)
+ }
+ }
+ return append(l, e)
+}
+
+// sortedMimes returns a list of mime sorted (desc) by its specified quality.
+func sortedMimes(accept string) (sorted []mime) {
+ for _, each := range strings.Split(accept, ",") {
+ typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
+ if len(typeAndQuality) == 1 {
+ sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
+ } else {
+ // take factor
+ parts := strings.Split(typeAndQuality[1], "=")
+ if len(parts) == 2 {
+ f, err := strconv.ParseFloat(parts[1], 64)
+ if err != nil {
+ traceLogger.Printf("unable to parse quality in %s, %v", each, err)
+ } else {
+ sorted = insertMime(sorted, mime{typeAndQuality[0], f})
+ }
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/emicklei/go-restful/mime_test.go b/vendor/github.com/emicklei/go-restful/mime_test.go
new file mode 100644
index 000000000..a910bb100
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/mime_test.go
@@ -0,0 +1,17 @@
+package restful
+
+import (
+ "fmt"
+ "testing"
+)
+
+// go test -v -test.run TestSortMimes ...restful
+func TestSortMimes(t *testing.T) {
+ accept := "text/html; q=0.8, text/plain, image/gif, */*; q=0.01, image/jpeg"
+ result := sortedMimes(accept)
+ got := fmt.Sprintf("%v", result)
+ want := "[{text/plain 1} {image/gif 1} {image/jpeg 1} {text/html 0.8} {*/* 0.01}]"
+ if got != want {
+ t.Errorf("bad sort order of mime types:%s", got)
+ }
+}
diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/options_filter.go
index 4514eadcf..5c1b34251 100644
--- a/vendor/github.com/emicklei/go-restful/options_filter.go
+++ b/vendor/github.com/emicklei/go-restful/options_filter.go
@@ -15,7 +15,15 @@ func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterCha
chain.ProcessFilter(req, resp)
return
}
- resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ","))
+
+ archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
+ methods := strings.Join(c.computeAllowedMethods(req), ",")
+ origin := req.Request.Header.Get(HEADER_Origin)
+
+ resp.AddHeader(HEADER_Allow, methods)
+ resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
+ resp.AddHeader(HEADER_AccessControlAllowHeaders, archs)
+ resp.AddHeader(HEADER_AccessControlAllowMethods, methods)
}
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go
index 988adc984..8c23af12c 100644
--- a/vendor/github.com/emicklei/go-restful/request.go
+++ b/vendor/github.com/emicklei/go-restful/request.go
@@ -5,20 +5,15 @@ package restful
// that can be found in the LICENSE file.
import (
- "bytes"
"compress/zlib"
- "io/ioutil"
"net/http"
)
var defaultRequestContentType string
-var doCacheReadEntityBytes = true
-
// Request is a wrapper for a http Request that provides convenience methods
type Request struct {
Request *http.Request
- bodyContent *[]byte // to cache the request body for multiple reads of ReadEntity
pathParameters map[string]string
attributes map[string]interface{} // for storing request-scoped values
selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
@@ -41,12 +36,6 @@ func DefaultRequestContentType(mime string) {
defaultRequestContentType = mime
}
-// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
-// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it.
-func SetCacheReadEntity(doCache bool) {
- doCacheReadEntityBytes = doCache
-}
-
// PathParameter accesses the Path parameter value by its name
func (r *Request) PathParameter(name string) string {
return r.pathParameters[name]
@@ -81,18 +70,6 @@ func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
contentType := r.Request.Header.Get(HEADER_ContentType)
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
- // OLD feature, cache the body for reads
- if doCacheReadEntityBytes {
- if r.bodyContent == nil {
- data, err := ioutil.ReadAll(r.Request.Body)
- if err != nil {
- return err
- }
- r.bodyContent = &data
- }
- r.Request.Body = ioutil.NopCloser(bytes.NewReader(*r.bodyContent))
- }
-
// check if the request body needs decompression
if ENCODING_GZIP == contentEncoding {
gzipReader := currentCompressorProvider.AcquireGzipReader()
@@ -107,10 +84,15 @@ func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
r.Request.Body = zlibReader
}
- // lookup the EntityReader
- entityReader, ok := entityAccessRegistry.AccessorAt(contentType)
+ // lookup the EntityReader, use defaultRequestContentType if needed and provided
+ entityReader, ok := entityAccessRegistry.accessorAt(contentType)
if !ok {
- return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
+ if len(defaultRequestContentType) != 0 {
+ entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType)
+ }
+ if !ok {
+ return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
+ }
}
return entityReader.Read(r, entityPointer)
}
diff --git a/vendor/github.com/emicklei/go-restful/request_test.go b/vendor/github.com/emicklei/go-restful/request_test.go
index 72f078f92..31f509659 100644
--- a/vendor/github.com/emicklei/go-restful/request_test.go
+++ b/vendor/github.com/emicklei/go-restful/request_test.go
@@ -29,38 +29,6 @@ type Sample struct {
Value string
}
-func TestReadEntityXmlCached(t *testing.T) {
- SetCacheReadEntity(true)
- bodyReader := strings.NewReader("<Sample><Value>42</Value></Sample>")
- httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
- httpRequest.Header.Set("Content-Type", "application/xml")
- request := &Request{Request: httpRequest}
- sam := new(Sample)
- request.ReadEntity(sam)
- if sam.Value != "42" {
- t.Fatal("read failed")
- }
- if request.bodyContent == nil {
- t.Fatal("no expected cached bytes found")
- }
-}
-
-func TestReadEntityXmlNonCached(t *testing.T) {
- SetCacheReadEntity(false)
- bodyReader := strings.NewReader("<Sample><Value>42</Value></Sample>")
- httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
- httpRequest.Header.Set("Content-Type", "application/xml")
- request := &Request{Request: httpRequest}
- sam := new(Sample)
- request.ReadEntity(sam)
- if sam.Value != "42" {
- t.Fatal("read failed")
- }
- if request.bodyContent != nil {
- t.Fatal("unexpected cached bytes found")
- }
-}
-
func TestReadEntityJson(t *testing.T) {
bodyReader := strings.NewReader(`{"Value" : "42"}`)
httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
@@ -86,37 +54,6 @@ func TestReadEntityJsonCharset(t *testing.T) {
}
func TestReadEntityJsonNumber(t *testing.T) {
- SetCacheReadEntity(true)
- bodyReader := strings.NewReader(`{"Value" : 4899710515899924123}`)
- httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
- httpRequest.Header.Set("Content-Type", "application/json")
- request := &Request{Request: httpRequest}
- any := make(Anything)
- request.ReadEntity(&any)
- number, ok := any["Value"].(json.Number)
- if !ok {
- t.Fatal("read failed")
- }
- vint, err := number.Int64()
- if err != nil {
- t.Fatal("convert failed")
- }
- if vint != 4899710515899924123 {
- t.Fatal("read failed")
- }
- vfloat, err := number.Float64()
- if err != nil {
- t.Fatal("convert failed")
- }
- // match the default behaviour
- vstring := strconv.FormatFloat(vfloat, 'e', 15, 64)
- if vstring != "4.899710515899924e+18" {
- t.Fatal("convert float64 failed")
- }
-}
-
-func TestReadEntityJsonNumberNonCached(t *testing.T) {
- SetCacheReadEntity(false)
bodyReader := strings.NewReader(`{"Value" : 4899710515899924123}`)
httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
httpRequest.Header.Set("Content-Type", "application/json")
diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go
index 3798f18c8..4d987d130 100644
--- a/vendor/github.com/emicklei/go-restful/response.go
+++ b/vendor/github.com/emicklei/go-restful/response.go
@@ -5,12 +5,13 @@ package restful
// that can be found in the LICENSE file.
import (
+ "bufio"
"errors"
+ "net"
"net/http"
- "strings"
)
-// DEPRECATED, use DefaultResponseContentType(mime)
+// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
var DefaultResponseMimeType string
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
@@ -20,19 +21,22 @@ var PrettyPrintResponses = true
// It provides several convenience methods to prepare and write response content.
type Response struct {
http.ResponseWriter
- requestAccept string // mime-type what the Http Request says it wants to receive
- routeProduces []string // mime-types what the Route says it can produce
- statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200)
- contentLength int // number of bytes written for the response body
- prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
- err error // err property is kept when WriteError is called
+ requestAccept string // mime-type what the Http Request says it wants to receive
+ routeProduces []string // mime-types what the Route says it can produce
+ statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200)
+ contentLength int // number of bytes written for the response body
+ prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
+ err error // err property is kept when WriteError is called
+ hijacker http.Hijacker // if underlying ResponseWriter supports it
}
-// Creates a new response based on a http ResponseWriter.
+// NewResponse creates a new response based on a http ResponseWriter.
func NewResponse(httpWriter http.ResponseWriter) *Response {
- return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
+ hijacker, _ := httpWriter.(http.Hijacker)
+ return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker}
}
+// DefaultResponseContentType set a default.
// If Accept header matching fails, fall back to this type.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:
@@ -48,6 +52,16 @@ func (r Response) InternalServerError() Response {
return r
}
+// Hijack implements the http.Hijacker interface. This expands
+// the Response to fulfill http.Hijacker if the underlying
+// http.ResponseWriter supports it.
+func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ if r.hijacker == nil {
+ return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter")
+ }
+ return r.hijacker.Hijack()
+}
+
// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
func (r *Response) PrettyPrint(bePretty bool) {
r.prettyPrint = bePretty
@@ -68,38 +82,39 @@ func (r *Response) SetRequestAccepts(mime string) {
// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
- for _, qualifiedMime := range strings.Split(r.requestAccept, ",") {
- mime := strings.Trim(strings.Split(qualifiedMime, ";")[0], " ")
- if 0 == len(mime) || mime == "*/*" {
- for _, each := range r.routeProduces {
- if MIME_JSON == each {
- return entityAccessRegistry.AccessorAt(MIME_JSON)
- }
- if MIME_XML == each {
- return entityAccessRegistry.AccessorAt(MIME_XML)
+ sorted := sortedMimes(r.requestAccept)
+ for _, eachAccept := range sorted {
+ for _, eachProduce := range r.routeProduces {
+ if eachProduce == eachAccept.media {
+ if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
+ return w, true
}
}
- } else { // mime is not blank; see if we have a match in Produces
+ }
+ if eachAccept.media == "*/*" {
for _, each := range r.routeProduces {
- if mime == each {
- if MIME_JSON == each {
- return entityAccessRegistry.AccessorAt(MIME_JSON)
- }
- if MIME_XML == each {
- return entityAccessRegistry.AccessorAt(MIME_XML)
- }
+ if w, ok := entityAccessRegistry.accessorAt(each); ok {
+ return w, true
}
}
}
}
- writer, ok := entityAccessRegistry.AccessorAt(r.requestAccept)
+ // if requestAccept is empty
+ writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
if !ok {
// if not registered then fallback to the defaults (if set)
if DefaultResponseMimeType == MIME_JSON {
- return entityAccessRegistry.AccessorAt(MIME_JSON)
+ return entityAccessRegistry.accessorAt(MIME_JSON)
}
if DefaultResponseMimeType == MIME_XML {
- return entityAccessRegistry.AccessorAt(MIME_XML)
+ return entityAccessRegistry.accessorAt(MIME_XML)
+ }
+ // Fallback to whatever the route says it can produce.
+ // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+ for _, each := range r.routeProduces {
+ if w, ok := entityAccessRegistry.accessorAt(each); ok {
+ return w, true
+ }
}
if trace {
traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
@@ -130,25 +145,25 @@ func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
}
// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
-// It uses the standard encoding/xml package for marshalling the valuel ; not using a registered EntityReaderWriter.
+// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteAsXml(value interface{}) error {
return writeXML(r, http.StatusOK, MIME_XML, value)
}
// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
-// It uses the standard encoding/xml package for marshalling the valuel ; not using a registered EntityReaderWriter.
+// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
return writeXML(r, status, MIME_XML, value)
}
// WriteAsJson is a convenience method for writing a value in json.
-// It uses the standard encoding/json package for marshalling the valuel ; not using a registered EntityReaderWriter.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteAsJson(value interface{}) error {
return writeJSON(r, http.StatusOK, MIME_JSON, value)
}
// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
-// It uses the standard encoding/json package for marshalling the valuel ; not using a registered EntityReaderWriter.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteJson(value interface{}, contentType string) error {
return writeJSON(r, http.StatusOK, contentType, value)
}
@@ -184,6 +199,15 @@ func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
return nil
}
+// Flush implements http.Flusher interface, which sends any buffered data to the client.
+func (r *Response) Flush() {
+ if f, ok := r.ResponseWriter.(http.Flusher); ok {
+ f.Flush()
+ } else if trace {
+ traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
+ }
+}
+
// WriteHeader is overridden to remember the Status Code that has been written.
// Changes to the Header of the response have no effect after this.
func (r *Response) WriteHeader(httpStatus int) {
diff --git a/vendor/github.com/emicklei/go-restful/response_test.go b/vendor/github.com/emicklei/go-restful/response_test.go
index c8354f8ae..0587c40b4 100644
--- a/vendor/github.com/emicklei/go-restful/response_test.go
+++ b/vendor/github.com/emicklei/go-restful/response_test.go
@@ -10,7 +10,7 @@ import (
func TestWriteHeader(t *testing.T) {
httpWriter := httptest.NewRecorder()
- resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true}
resp.WriteHeader(123)
if resp.StatusCode() != 123 {
t.Errorf("Unexpected status code:%d", resp.StatusCode())
@@ -19,7 +19,7 @@ func TestWriteHeader(t *testing.T) {
func TestNoWriteHeader(t *testing.T) {
httpWriter := httptest.NewRecorder()
- resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true}
if resp.StatusCode() != http.StatusOK {
t.Errorf("Unexpected status code:%d", resp.StatusCode())
}
@@ -32,7 +32,7 @@ type food struct {
// go test -v -test.run TestMeasureContentLengthXml ...restful
func TestMeasureContentLengthXml(t *testing.T) {
httpWriter := httptest.NewRecorder()
- resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true}
resp.WriteAsXml(food{"apple"})
if resp.ContentLength() != 76 {
t.Errorf("Incorrect measured length:%d", resp.ContentLength())
@@ -42,7 +42,7 @@ func TestMeasureContentLengthXml(t *testing.T) {
// go test -v -test.run TestMeasureContentLengthJson ...restful
func TestMeasureContentLengthJson(t *testing.T) {
httpWriter := httptest.NewRecorder()
- resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true}
resp.WriteAsJson(food{"apple"})
if resp.ContentLength() != 22 {
t.Errorf("Incorrect measured length:%d", resp.ContentLength())
@@ -52,7 +52,7 @@ func TestMeasureContentLengthJson(t *testing.T) {
// go test -v -test.run TestMeasureContentLengthJsonNotPretty ...restful
func TestMeasureContentLengthJsonNotPretty(t *testing.T) {
httpWriter := httptest.NewRecorder()
- resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, false, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}}
resp.WriteAsJson(food{"apple"})
if resp.ContentLength() != 17 { // 16+1 using the Encoder directly yields another /n
t.Errorf("Incorrect measured length:%d", resp.ContentLength())
@@ -62,7 +62,7 @@ func TestMeasureContentLengthJsonNotPretty(t *testing.T) {
// go test -v -test.run TestMeasureContentLengthWriteErrorString ...restful
func TestMeasureContentLengthWriteErrorString(t *testing.T) {
httpWriter := httptest.NewRecorder()
- resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true}
resp.WriteErrorString(404, "Invalid")
if resp.ContentLength() != len("Invalid") {
t.Errorf("Incorrect measured length:%d", resp.ContentLength())
@@ -80,7 +80,7 @@ func TestStatusIsPassedToResponse(t *testing.T) {
{write: 400, read: 400},
} {
httpWriter := httptest.NewRecorder()
- resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true}
resp.WriteHeader(each.write)
if got, want := httpWriter.Code, each.read; got != want {
t.Errorf("got %v want %v", got, want)
@@ -91,11 +91,11 @@ func TestStatusIsPassedToResponse(t *testing.T) {
// go test -v -test.run TestStatusCreatedAndContentTypeJson_Issue54 ...restful
func TestStatusCreatedAndContentTypeJson_Issue54(t *testing.T) {
httpWriter := httptest.NewRecorder()
- resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "application/json", routeProduces: []string{"application/json"}, prettyPrint: true}
resp.WriteHeader(201)
resp.WriteAsJson(food{"Juicy"})
if httpWriter.HeaderMap.Get("Content-Type") != "application/json" {
- t.Errorf("Expected content type json but got:%d", httpWriter.HeaderMap.Get("Content-Type"))
+ t.Errorf("Expected content type json but got:%s", httpWriter.HeaderMap.Get("Content-Type"))
}
if httpWriter.Code != 201 {
t.Errorf("Expected status 201 but got:%d", httpWriter.Code)
@@ -113,7 +113,7 @@ func (e errorOnWriteRecorder) Write(bytes []byte) (int, error) {
// go test -v -test.run TestLastWriteErrorCaught ...restful
func TestLastWriteErrorCaught(t *testing.T) {
httpWriter := errorOnWriteRecorder{httptest.NewRecorder()}
- resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "application/json", routeProduces: []string{"application/json"}, prettyPrint: true}
err := resp.WriteAsJson(food{"Juicy"})
if err.Error() != "fail" {
t.Errorf("Unexpected error message:%v", err)
@@ -124,7 +124,7 @@ func TestLastWriteErrorCaught(t *testing.T) {
func TestAcceptStarStar_Issue83(t *testing.T) {
httpWriter := httptest.NewRecorder()
// Accept Produces
- resp := Response{httpWriter, "application/bogus,*/*;q=0.8", []string{"application/json"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "application/bogus,*/*;q=0.8", routeProduces: []string{"application/json"}, prettyPrint: true}
resp.WriteEntity(food{"Juicy"})
ct := httpWriter.Header().Get("Content-Type")
if "application/json" != ct {
@@ -136,7 +136,7 @@ func TestAcceptStarStar_Issue83(t *testing.T) {
func TestAcceptSkipStarStar_Issue83(t *testing.T) {
httpWriter := httptest.NewRecorder()
// Accept Produces
- resp := Response{httpWriter, " application/xml ,*/* ; q=0.8", []string{"application/json", "application/xml"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: " application/xml ,*/* ; q=0.8", routeProduces: []string{"application/json", "application/xml"}, prettyPrint: true}
resp.WriteEntity(food{"Juicy"})
ct := httpWriter.Header().Get("Content-Type")
if "application/xml" != ct {
@@ -148,7 +148,7 @@ func TestAcceptSkipStarStar_Issue83(t *testing.T) {
func TestAcceptXmlBeforeStarStar_Issue83(t *testing.T) {
httpWriter := httptest.NewRecorder()
// Accept Produces
- resp := Response{httpWriter, "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", []string{"application/json"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", routeProduces: []string{"application/json"}, prettyPrint: true}
resp.WriteEntity(food{"Juicy"})
ct := httpWriter.Header().Get("Content-Type")
if "application/json" != ct {
@@ -159,7 +159,7 @@ func TestAcceptXmlBeforeStarStar_Issue83(t *testing.T) {
// go test -v -test.run TestWriteHeaderNoContent_Issue124 ...restful
func TestWriteHeaderNoContent_Issue124(t *testing.T) {
httpWriter := httptest.NewRecorder()
- resp := Response{httpWriter, "text/plain", []string{"text/plain"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "text/plain", routeProduces: []string{"text/plain"}, prettyPrint: true}
resp.WriteHeader(http.StatusNoContent)
if httpWriter.Code != http.StatusNoContent {
t.Errorf("got %d want %d", httpWriter.Code, http.StatusNoContent)
@@ -169,7 +169,7 @@ func TestWriteHeaderNoContent_Issue124(t *testing.T) {
// go test -v -test.run TestStatusCreatedAndContentTypeJson_Issue163 ...restful
func TestStatusCreatedAndContentTypeJson_Issue163(t *testing.T) {
httpWriter := httptest.NewRecorder()
- resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "application/json", routeProduces: []string{"application/json"}, prettyPrint: true}
resp.WriteHeader(http.StatusNotModified)
if httpWriter.Code != http.StatusNotModified {
t.Errorf("Got %d want %d", httpWriter.Code, http.StatusNotModified)
@@ -178,7 +178,7 @@ func TestStatusCreatedAndContentTypeJson_Issue163(t *testing.T) {
func TestWriteHeaderAndEntity_Issue235(t *testing.T) {
httpWriter := httptest.NewRecorder()
- resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "application/json", routeProduces: []string{"application/json"}, prettyPrint: true}
var pong = struct {
Foo string `json:"foo"`
}{Foo: "123"}
@@ -194,9 +194,18 @@ func TestWriteHeaderAndEntity_Issue235(t *testing.T) {
}
}
-func TestWriteEntityNotAcceptable(t *testing.T) {
+func TestWriteEntityNoAcceptMatchWithProduces(t *testing.T) {
httpWriter := httptest.NewRecorder()
- resp := Response{httpWriter, "application/bogus", []string{"application/json"}, 0, 0, true, nil}
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "application/bogus", routeProduces: []string{"application/json"}, prettyPrint: true}
+ resp.WriteEntity("done")
+ if httpWriter.Code != http.StatusOK {
+ t.Errorf("got %d want %d", httpWriter.Code, http.StatusOK)
+ }
+}
+
+func TestWriteEntityNoAcceptMatchNoProduces(t *testing.T) {
+ httpWriter := httptest.NewRecorder()
+ resp := Response{ResponseWriter: httpWriter, requestAccept: "application/bogus", routeProduces: []string{}, prettyPrint: true}
resp.WriteEntity("done")
if httpWriter.Code != http.StatusNotAcceptable {
t.Errorf("got %d want %d", httpWriter.Code, http.StatusNotAcceptable)
diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go
index f54e8622e..9d5b156e0 100644
--- a/vendor/github.com/emicklei/go-restful/route.go
+++ b/vendor/github.com/emicklei/go-restful/route.go
@@ -13,6 +13,11 @@ import (
// RouteFunction declares the signature of a function that can be bound to a Route.
type RouteFunction func(*Request, *Response)
+// RouteSelectionConditionFunction declares the signature of a function that
+// can be used to add extra conditional logic when selecting whether the route
+// matches the HTTP request.
+type RouteSelectionConditionFunction func(httpRequest *http.Request) bool
+
// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
type Route struct {
Method string
@@ -21,6 +26,7 @@ type Route struct {
Path string // webservice root path + described path
Function RouteFunction
Filters []FilterFunction
+ If []RouteSelectionConditionFunction
// cached values for dispatching
relativePath string
@@ -34,6 +40,9 @@ type Route struct {
ParameterDocs []*Parameter
ResponseErrors map[int]ResponseError
ReadSample, WriteSample interface{} // structs that model an example request or response payload
+
+ // Extra information used to store custom information about the route.
+ Metadata map[string]interface{}
}
// Initialize for Route
@@ -97,7 +106,7 @@ func (r Route) matchesContentType(mimeTypes string) bool {
}
if len(mimeTypes) == 0 {
- // idempotent methods with (most-likely or garanteed) empty content match missing Content-Type
+ // idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type
m := r.Method
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
return true
diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go
index b49b7c74d..83db02b7c 100644
--- a/vendor/github.com/emicklei/go-restful/route_builder.go
+++ b/vendor/github.com/emicklei/go-restful/route_builder.go
@@ -5,10 +5,12 @@ package restful
// that can be found in the LICENSE file.
import (
+ "fmt"
"os"
"reflect"
"runtime"
"strings"
+ "sync/atomic"
"github.com/emicklei/go-restful/log"
)
@@ -22,6 +24,10 @@ type RouteBuilder struct {
httpMethod string // required
function RouteFunction // required
filters []FilterFunction
+ conditions []RouteSelectionConditionFunction
+
+ typeNameHandleFunc TypeNameHandleFunction // required
+
// documentation
doc string
notes string
@@ -29,6 +35,7 @@ type RouteBuilder struct {
readSample, writeSample interface{}
parameters []*Parameter
errorMap map[int]ResponseError
+ metadata map[string]interface{}
}
// Do evaluates each argument with the RouteBuilder itself.
@@ -83,7 +90,7 @@ func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
return b
}
-// A verbose explanation of the operation behavior. Optional.
+// Notes is a verbose explanation of the operation behavior. Optional.
func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
b.notes = notes
return b
@@ -92,8 +99,13 @@ func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
// Reads tells what resource type will be read from the request payload. Optional.
// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
+ fn := b.typeNameHandleFunc
+ if fn == nil {
+ fn = reflectTypeName
+ }
+ typeAsName := fn(sample)
+
b.readSample = sample
- typeAsName := reflect.TypeOf(sample).String()
bodyParameter := &Parameter{&ParameterData{Name: "body"}}
bodyParameter.beBody()
bodyParameter.Required(true)
@@ -128,7 +140,7 @@ func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
return b
}
-// Operation allows you to document what the acutal method/function call is of the Route.
+// Operation allows you to document what the actual method/function call is of the Route.
// Unless called, the operation name is derived from the RouteFunction set using To(..).
func (b *RouteBuilder) Operation(name string) *RouteBuilder {
b.operation = name
@@ -145,9 +157,10 @@ func (b *RouteBuilder) ReturnsError(code int, message string, model interface{})
// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
err := ResponseError{
- Code: code,
- Message: message,
- Model: model,
+ Code: code,
+ Message: message,
+ Model: model,
+ IsDefault: false,
}
// lazy init because there is no NewRouteBuilder (yet)
if b.errorMap == nil {
@@ -157,10 +170,36 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou
return b
}
+// DefaultReturns is a special Returns call that sets the default of the response ; the code is zero.
+func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
+ b.Returns(0, message, model)
+ // Modify the ResponseError just added/updated
+ re := b.errorMap[0]
+ // errorMap is initialized
+ b.errorMap[0] = ResponseError{
+ Code: re.Code,
+ Message: re.Message,
+ Model: re.Model,
+ IsDefault: true,
+ }
+ return b
+}
+
+// Metadata adds or updates a key=value pair to the metadata map.
+func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
+ if b.metadata == nil {
+ b.metadata = map[string]interface{}{}
+ }
+ b.metadata[key] = value
+ return b
+}
+
+// ResponseError represents a response; not necessarily an error.
type ResponseError struct {
- Code int
- Message string
- Model interface{}
+ Code int
+ Message string
+ Model interface{}
+ IsDefault bool
}
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
@@ -174,6 +213,21 @@ func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
return b
}
+// If sets a condition function that controls matching the Route based on custom logic.
+// The condition function is provided the HTTP request and should return true if the route
+// should be considered.
+//
+// Efficiency note: the condition function is called before checking the method, produces, and
+// consumes criteria, so that the correct HTTP status code can be returned.
+//
+// Lifecycle note: no filter functions have been called prior to calling the condition function,
+// so the condition function should not depend on any context that might be set up by container
+// or route filters.
+func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder {
+ b.conditions = append(b.conditions, condition)
+ return b
+}
+
// If no specific Route path then set to rootPath
// If no specific Produces then set to rootProduces
// If no specific Consumes then set to rootConsumes
@@ -186,6 +240,13 @@ func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
}
}
+// typeNameHandler sets the function that will convert types to strings in the parameter
+// and model definitions.
+func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
+ b.typeNameHandleFunc = handler
+ return b
+}
+
// Build creates a new Route using the specification details collected by the RouteBuilder
func (b *RouteBuilder) Build() Route {
pathExpr, err := newPathExpression(b.currentPath)
@@ -209,6 +270,7 @@ func (b *RouteBuilder) Build() Route {
Consumes: b.consumes,
Function: b.function,
Filters: b.filters,
+ If: b.conditions,
relativePath: b.currentPath,
pathExpr: pathExpr,
Doc: b.doc,
@@ -217,7 +279,8 @@ func (b *RouteBuilder) Build() Route {
ParameterDocs: b.parameters,
ResponseErrors: b.errorMap,
ReadSample: b.readSample,
- WriteSample: b.writeSample}
+ WriteSample: b.writeSample,
+ Metadata: b.metadata}
route.postBuild()
return route
}
@@ -226,6 +289,8 @@ func concatPath(path1, path2 string) string {
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
}
+var anonymousFuncCount int32
+
// nameOfFunction returns the short name of the function f for documentation.
// It uses a runtime feature for debugging ; its value may change for later Go versions.
func nameOfFunction(f interface{}) string {
@@ -236,5 +301,10 @@ func nameOfFunction(f interface{}) string {
last = strings.TrimSuffix(last, ")-fm") // Go 1.5
last = strings.TrimSuffix(last, "·fm") // < Go 1.5
last = strings.TrimSuffix(last, "-fm") // Go 1.5
+ if last == "func1" { // this could mean conflicts in API docs
+ val := atomic.AddInt32(&anonymousFuncCount, 1)
+ last = "func" + fmt.Sprintf("%d", val)
+ atomic.StoreInt32(&anonymousFuncCount, val)
+ }
return last
}
diff --git a/vendor/github.com/emicklei/go-restful/route_builder_test.go b/vendor/github.com/emicklei/go-restful/route_builder_test.go
index 56dbe02e4..25881d5eb 100644
--- a/vendor/github.com/emicklei/go-restful/route_builder_test.go
+++ b/vendor/github.com/emicklei/go-restful/route_builder_test.go
@@ -2,6 +2,7 @@ package restful
import (
"testing"
+ "time"
)
func TestRouteBuilder_PathParameter(t *testing.T) {
@@ -41,7 +42,7 @@ func TestRouteBuilder(t *testing.T) {
json := "application/json"
b := new(RouteBuilder)
b.To(dummy)
- b.Path("/routes").Method("HEAD").Consumes(json).Produces(json)
+ b.Path("/routes").Method("HEAD").Consumes(json).Produces(json).Metadata("test", "test-value").DefaultReturns("default", time.Now())
r := b.Build()
if r.Path != "/routes" {
t.Error("path invalid")
@@ -55,4 +56,21 @@ func TestRouteBuilder(t *testing.T) {
if r.Operation != "dummy" {
t.Error("Operation not set")
}
+ if r.Metadata["test"] != "test-value" {
+ t.Errorf("Metadata not set")
+ }
+ if _, ok := r.ResponseErrors[0]; !ok {
+ t.Fatal("expected default response")
+ }
+}
+
+func TestAnonymousFuncNaming(t *testing.T) {
+ f1 := func() {}
+ f2 := func() {}
+ if got, want := nameOfFunction(f1), "func1"; got != want {
+ t.Errorf("got %v want %v", got, want)
+ }
+ if got, want := nameOfFunction(f2), "func2"; got != want {
+ t.Errorf("got %v want %v", got, want)
+ }
}
diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go
index e89be7009..094c0a02a 100644
--- a/vendor/github.com/emicklei/go-restful/web_service.go
+++ b/vendor/github.com/emicklei/go-restful/web_service.go
@@ -1,8 +1,9 @@
package restful
import (
- "fmt"
+ "errors"
"os"
+ "reflect"
"sync"
"github.com/emicklei/go-restful/log"
@@ -24,6 +25,8 @@ type WebService struct {
documentation string
apiVersion string
+ typeNameHandleFunc TypeNameHandleFunction
+
dynamicRoutes bool
// protects 'routes' if dynamic routes are enabled
@@ -34,11 +37,27 @@ func (w *WebService) SetDynamicRoutes(enable bool) {
w.dynamicRoutes = enable
}
+// TypeNameHandleFunction declares functions that can handle translating the name of a sample object
+// into the restful documentation for the service.
+type TypeNameHandleFunction func(sample interface{}) string
+
+// TypeNameHandler sets the function that will convert types to strings in the parameter
+// and model definitions. If not set, the web service will invoke
+// reflect.TypeOf(object).String().
+func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService {
+ w.typeNameHandleFunc = handler
+ return w
+}
+
+// reflectTypeName is the default TypeNameHandleFunction and for a given object
+// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via
+// the reflection API.
+func reflectTypeName(sample interface{}) string {
+ return reflect.TypeOf(sample).String()
+}
+
// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
func (w *WebService) compilePathExpression() {
- if len(w.rootPath) == 0 {
- w.Path("/") // lazy initialize path
- }
compiled, err := newPathExpression(w.rootPath)
if err != nil {
log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err)
@@ -54,12 +73,15 @@ func (w *WebService) ApiVersion(apiVersion string) *WebService {
}
// Version returns the API version for documentation purposes.
-func (w WebService) Version() string { return w.apiVersion }
+func (w *WebService) Version() string { return w.apiVersion }
// Path specifies the root URL template path of the WebService.
// All Routes will be relative to this path.
func (w *WebService) Path(root string) *WebService {
w.rootPath = root
+ if len(w.rootPath) == 0 {
+ w.rootPath = "/"
+ }
w.compilePathExpression()
return w
}
@@ -155,21 +177,26 @@ func (w *WebService) Route(builder *RouteBuilder) *WebService {
// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
func (w *WebService) RemoveRoute(path, method string) error {
if !w.dynamicRoutes {
- return fmt.Errorf("dynamic routes are not enabled.")
+ return errors.New("dynamic routes are not enabled.")
}
w.routesLock.Lock()
defer w.routesLock.Unlock()
+ newRoutes := make([]Route, (len(w.routes) - 1))
+ current := 0
for ix := range w.routes {
if w.routes[ix].Method == method && w.routes[ix].Path == path {
- w.routes = append(w.routes[:ix], w.routes[ix+1:]...)
+ continue
}
+ newRoutes[current] = w.routes[ix]
+ current = current + 1
}
+ w.routes = newRoutes
return nil
}
// Method creates a new RouteBuilder and initialize its http method
func (w *WebService) Method(httpMethod string) *RouteBuilder {
- return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod)
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod)
}
// Produces specifies that this WebService can produce one or more MIME types.
@@ -187,7 +214,7 @@ func (w *WebService) Consumes(accepts ...string) *WebService {
}
// Routes returns the Routes associated with this WebService
-func (w WebService) Routes() []Route {
+func (w *WebService) Routes() []Route {
if !w.dynamicRoutes {
return w.routes
}
@@ -202,12 +229,12 @@ func (w WebService) Routes() []Route {
}
// RootPath returns the RootPath associated with this WebService. Default "/"
-func (w WebService) RootPath() string {
+func (w *WebService) RootPath() string {
return w.rootPath
}
-// PathParameters return the path parameter names for (shared amoung its Routes)
-func (w WebService) PathParameters() []*Parameter {
+// PathParameters return the path parameter names for (shared among its Routes)
+func (w *WebService) PathParameters() []*Parameter {
return w.pathParameters
}
@@ -224,7 +251,7 @@ func (w *WebService) Doc(plainText string) *WebService {
}
// Documentation returns it.
-func (w WebService) Documentation() string {
+func (w *WebService) Documentation() string {
return w.documentation
}
@@ -234,30 +261,30 @@ func (w WebService) Documentation() string {
// HEAD is a shortcut for .Method("HEAD").Path(subPath)
func (w *WebService) HEAD(subPath string) *RouteBuilder {
- return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath)
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath)
}
// GET is a shortcut for .Method("GET").Path(subPath)
func (w *WebService) GET(subPath string) *RouteBuilder {
- return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath)
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath)
}
// POST is a shortcut for .Method("POST").Path(subPath)
func (w *WebService) POST(subPath string) *RouteBuilder {
- return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath)
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath)
}
// PUT is a shortcut for .Method("PUT").Path(subPath)
func (w *WebService) PUT(subPath string) *RouteBuilder {
- return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath)
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath)
}
// PATCH is a shortcut for .Method("PATCH").Path(subPath)
func (w *WebService) PATCH(subPath string) *RouteBuilder {
- return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath)
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath)
}
// DELETE is a shortcut for .Method("DELETE").Path(subPath)
func (w *WebService) DELETE(subPath string) *RouteBuilder {
- return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath)
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
}
diff --git a/vendor/github.com/emicklei/go-restful/web_service_test.go b/vendor/github.com/emicklei/go-restful/web_service_test.go
index 469890434..734938134 100644
--- a/vendor/github.com/emicklei/go-restful/web_service_test.go
+++ b/vendor/github.com/emicklei/go-restful/web_service_test.go
@@ -44,6 +44,8 @@ func TestCapturePanic(t *testing.T) {
httpRequest, _ := http.NewRequest("GET", "http://here.com/fire", nil)
httpRequest.Header.Set("Accept", "*/*")
httpWriter := httptest.NewRecorder()
+ // override the default here
+ DefaultContainer.DoNotRecover(false)
DefaultContainer.dispatch(httpWriter, httpRequest)
if 500 != httpWriter.Code {
t.Error("500 expected on fire")
@@ -110,6 +112,17 @@ func TestContentType415_Issue170(t *testing.T) {
}
}
+func TestNoContentTypePOST(t *testing.T) {
+ tearDown()
+ Add(newPostNoConsumesService())
+ httpRequest, _ := http.NewRequest("POST", "http://here.com/post", nil)
+ httpWriter := httptest.NewRecorder()
+ DefaultContainer.dispatch(httpWriter, httpRequest)
+ if 204 != httpWriter.Code {
+ t.Errorf("Expected 204, got %d", httpWriter.Code)
+ }
+}
+
func TestContentType415_POST_Issue170(t *testing.T) {
tearDown()
Add(newPostOnlyJsonOnlyService())
@@ -171,6 +184,41 @@ func TestRemoveRoute(t *testing.T) {
t.Errorf("got %v, want %v", got, want)
}
}
+func TestRemoveLastRoute(t *testing.T) {
+ tearDown()
+ TraceLogger(testLogger{t})
+ ws := newGetPlainTextOrJsonServiceMultiRoute()
+ Add(ws)
+ httpRequest, _ := http.NewRequest("GET", "http://here.com/get", nil)
+ httpRequest.Header.Set("Accept", "text/plain")
+ httpWriter := httptest.NewRecorder()
+ DefaultContainer.dispatch(httpWriter, httpRequest)
+ if got, want := httpWriter.Code, 200; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+
+ // dynamic apis are disabled, should error and do nothing
+ if err := ws.RemoveRoute("/get", "GET"); err == nil {
+ t.Error("unexpected non-error")
+ }
+
+ httpWriter = httptest.NewRecorder()
+ DefaultContainer.dispatch(httpWriter, httpRequest)
+ if got, want := httpWriter.Code, 200; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+
+ ws.SetDynamicRoutes(true)
+ if err := ws.RemoveRoute("/get", "GET"); err != nil {
+ t.Errorf("unexpected error %v", err)
+ }
+
+ httpWriter = httptest.NewRecorder()
+ DefaultContainer.dispatch(httpWriter, httpRequest)
+ if got, want := httpWriter.Code, 404; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+}
// go test -v -test.run TestContentTypeOctet_Issue170 ...restful
func TestContentTypeOctet_Issue170(t *testing.T) {
@@ -193,6 +241,29 @@ func TestContentTypeOctet_Issue170(t *testing.T) {
}
}
+type exampleBody struct{}
+
+func TestParameterDataTypeDefaults(t *testing.T) {
+ tearDown()
+ ws := new(WebService)
+ route := ws.POST("/post").Reads(&exampleBody{})
+ if route.parameters[0].data.DataType != "*restful.exampleBody" {
+ t.Errorf("body parameter incorrect name: %#v", route.parameters[0].data)
+ }
+}
+
+func TestParameterDataTypeCustomization(t *testing.T) {
+ tearDown()
+ ws := new(WebService)
+ ws.TypeNameHandler(func(sample interface{}) string {
+ return "my.custom.type.name"
+ })
+ route := ws.POST("/post").Reads(&exampleBody{})
+ if route.parameters[0].data.DataType != "my.custom.type.name" {
+ t.Errorf("body parameter incorrect name: %#v", route.parameters[0].data)
+ }
+}
+
func newPanicingService() *WebService {
ws := new(WebService).Path("")
ws.Route(ws.GET("/fire").To(doPanic))
@@ -226,6 +297,14 @@ func newGetPlainTextOrJsonService() *WebService {
return ws
}
+func newGetPlainTextOrJsonServiceMultiRoute() *WebService {
+ ws := new(WebService).Path("")
+ ws.Produces("text/plain", "application/json")
+ ws.Route(ws.GET("/get").To(doNothing))
+ ws.Route(ws.GET("/status").To(doNothing))
+ return ws
+}
+
func newGetConsumingOctetStreamService() *WebService {
ws := new(WebService).Path("")
ws.Consumes("application/octet-stream")
@@ -233,6 +312,12 @@ func newGetConsumingOctetStreamService() *WebService {
return ws
}
+func newPostNoConsumesService() *WebService {
+ ws := new(WebService).Path("")
+ ws.Route(ws.POST("/post").To(return204))
+ return ws
+}
+
func newSelectedRouteTestingService() *WebService {
ws := new(WebService).Path("")
ws.Route(ws.GET(pathGetFriends).To(selectedRouteChecker))
@@ -252,3 +337,7 @@ func doPanic(req *Request, resp *Response) {
func doNothing(req *Request, resp *Response) {
}
+
+func return204(req *Request, resp *Response) {
+ resp.WriteHeader(204)
+}
diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go
index 6d967389b..2db5cb558 100644
--- a/vendor/github.com/go-openapi/loads/spec.go
+++ b/vendor/github.com/go-openapi/loads/spec.go
@@ -186,7 +186,7 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) {
var expandOptions *spec.ExpandOptions
if len(options) > 0 {
- expandOptions = options[1]
+ expandOptions = options[0]
} else {
expandOptions = &spec.ExpandOptions{
RelativeBase: filepath.Dir(d.specFilePath),
diff --git a/vendor/github.com/go-openapi/loads/spec_test.go b/vendor/github.com/go-openapi/loads/spec_test.go
index 5c5e7ca4e..a3b241d95 100644
--- a/vendor/github.com/go-openapi/loads/spec_test.go
+++ b/vendor/github.com/go-openapi/loads/spec_test.go
@@ -32,6 +32,19 @@ func TestLoadsYAMLContent(t *testing.T) {
}
}
+// for issue 11
+func TestRegressionExpand(t *testing.T) {
+ swaggerFile := "fixtures/yaml/swagger/1/2/3/4/swagger.yaml"
+ document, err := Spec(swaggerFile)
+ assert.NoError(t, err)
+ assert.NotNil(t, document)
+ d, err := document.Expanded()
+ assert.NoError(t, err)
+ assert.NotNil(t, d)
+ b, _ := d.Spec().MarshalJSON()
+ assert.JSONEq(t, expectedExpanded, string(b))
+}
+
func TestFailsInvalidJSON(t *testing.T) {
_, err := Analyzed(json.RawMessage([]byte("{]")), "")
@@ -499,3 +512,132 @@ const PetStore20 = `{
}
}
`
+
+const expectedExpanded = `
+{
+ "produces":[
+ "application/json",
+ "plain/text"
+ ],
+ "schemes":[
+ "https",
+ "http"
+ ],
+ "swagger":"2.0",
+ "info":{
+ "description":"Something",
+ "title":"Something",
+ "contact":{
+ "name":"Somebody",
+ "url":"https://url.com",
+ "email":"email@url.com"
+ },
+ "version":"v1"
+ },
+ "host":"security.sonusnet.com",
+ "basePath":"/api",
+ "paths":{
+ "/whatnot":{
+ "get":{
+ "description":"Get something",
+ "responses":{
+ "200":{
+ "description":"The something",
+ "schema":{
+ "description":"A collection of service events",
+ "type":"object",
+ "properties":{
+ "page":{
+ "description":"A description of a paged result",
+ "type":"object",
+ "properties":{
+ "page":{
+ "description":"the page that was requested",
+ "type":"integer"
+ },
+ "page_items":{
+ "description":"the number of items per page requested",
+ "type":"integer"
+ },
+ "pages":{
+ "description":"the total number of pages available",
+ "type":"integer"
+ },
+ "total_items":{
+ "description":"the total number of items available",
+ "type":"integer",
+ "format":"int64"
+ }
+ }
+ },
+ "something":{
+ "description":"Something",
+ "type":"object",
+ "properties":{
+ "p1":{
+ "description":"A string",
+ "type":"string"
+ },
+ "p2":{
+ "description":"An integer",
+ "type":"integer"
+ }
+ }
+ }
+ }
+ }
+ },
+ "500":{
+ "description":"Oops"
+ }
+ }
+ }
+ }
+ },
+ "definitions":{
+ "Something":{
+ "description":"A collection of service events",
+ "type":"object",
+ "properties":{
+ "page":{
+ "description":"A description of a paged result",
+ "type":"object",
+ "properties":{
+ "page":{
+ "description":"the page that was requested",
+ "type":"integer"
+ },
+ "page_items":{
+ "description":"the number of items per page requested",
+ "type":"integer"
+ },
+ "pages":{
+ "description":"the total number of pages available",
+ "type":"integer"
+ },
+ "total_items":{
+ "description":"the total number of items available",
+ "type":"integer",
+ "format":"int64"
+ }
+ }
+ },
+ "something":{
+ "description":"Something",
+ "type":"object",
+ "properties":{
+ "p1":{
+ "description":"A string",
+ "type":"string"
+ },
+ "p2":{
+ "description":"An integer",
+ "type":"integer"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+`
diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go
index b4429a21c..7af80691f 100644
--- a/vendor/github.com/go-openapi/spec/expander.go
+++ b/vendor/github.com/go-openapi/spec/expander.go
@@ -352,14 +352,12 @@ func normalizeFileRef(ref *Ref, relativeBase string) *Ref {
}
func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error {
-
tgt := reflect.ValueOf(target)
if tgt.Kind() != reflect.Ptr {
return fmt.Errorf("resolve ref: target needs to be a pointer")
}
oldRef := currentRef
-
if currentRef != nil {
debugLog("resolve ref current %s new %s", currentRef.String(), ref.String())
nextRef := nextRef(node, ref, currentRef.GetPointer())
@@ -467,8 +465,6 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}
return err
}
- r.currentRef = currentRef
-
return nil
}
@@ -645,14 +641,18 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*
return resolver.root.(*Schema), nil
}
- // t is the new expanded schema
var t *Schema
-
+ var basePath string
+ b, _ := json.Marshal(target)
+ debugLog("Target is: %s", string(b))
for target.Ref.String() != "" {
if swag.ContainsStringsCI(parentRefs, target.Ref.String()) {
return &target, nil
}
-
+ basePath = target.Ref.RemoteURI()
+ debugLog("\n\n\n\n\nbasePath: %s", basePath)
+ b, _ := json.Marshal(target)
+ debugLog("calling Resolve with target: %s", string(b))
if err := resolver.Resolve(&target.Ref, &t); shouldStopOnError(err, resolver.options) {
return &target, err
}
@@ -666,7 +666,13 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*
target = *t
}
}
-
+ if target.Ref.String() == "" {
+ b, _ := json.Marshal(target)
+ debugLog("before: %s", string(b))
+ modifyRefs(&target, basePath)
+ b, _ = json.Marshal(target)
+ debugLog("after: %s", string(b))
+ }
t, err := expandItems(target, parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return &target, err
@@ -675,6 +681,8 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*
target = *t
}
+ resolver.reset()
+
for i := range target.AllOf {
t, err := expandSchema(target.AllOf[i], parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go
index 46944fb69..07ac88e66 100644
--- a/vendor/github.com/go-openapi/spec/items.go
+++ b/vendor/github.com/go-openapi/spec/items.go
@@ -178,9 +178,14 @@ func (i *Items) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &simpleSchema); err != nil {
return err
}
+ var vendorExtensible VendorExtensible
+ if err := json.Unmarshal(data, &vendorExtensible); err != nil {
+ return err
+ }
i.Refable = ref
i.CommonValidations = validations
i.SimpleSchema = simpleSchema
+ i.VendorExtensible = vendorExtensible
return nil
}
@@ -198,7 +203,11 @@ func (i Items) MarshalJSON() ([]byte, error) {
if err != nil {
return nil, err
}
- return swag.ConcatJSON(b3, b1, b2), nil
+ b4, err := json.Marshal(i.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b4, b3, b1, b2), nil
}
// JSONLookup look up a value by the json property name
diff --git a/vendor/github.com/go-openapi/spec/refmodifier.go b/vendor/github.com/go-openapi/spec/refmodifier.go
new file mode 100644
index 000000000..8482608ea
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/refmodifier.go
@@ -0,0 +1,82 @@
+// Copyright 2017 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "fmt"
+)
+
+func modifyItemsRefs(target *Schema, basePath string) {
+ if target.Items != nil {
+ if target.Items.Schema != nil {
+ modifyRefs(target.Items.Schema, basePath)
+ }
+ for i := range target.Items.Schemas {
+ s := target.Items.Schemas[i]
+ modifyRefs(&s, basePath)
+ target.Items.Schemas[i] = s
+ }
+ }
+}
+
+func modifyRefs(target *Schema, basePath string) {
+ if target.Ref.String() != "" {
+ if target.Ref.RemoteURI() == basePath {
+ return
+ }
+ newURL := fmt.Sprintf("%s%s", basePath, target.Ref.String())
+ target.Ref, _ = NewRef(newURL)
+ }
+
+ modifyItemsRefs(target, basePath)
+ for i := range target.AllOf {
+ modifyRefs(&target.AllOf[i], basePath)
+ }
+ for i := range target.AnyOf {
+ modifyRefs(&target.AnyOf[i], basePath)
+ }
+ for i := range target.OneOf {
+ modifyRefs(&target.OneOf[i], basePath)
+ }
+ if target.Not != nil {
+ modifyRefs(target.Not, basePath)
+ }
+ for k := range target.Properties {
+ s := target.Properties[k]
+ modifyRefs(&s, basePath)
+ target.Properties[k] = s
+ }
+ if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
+ modifyRefs(target.AdditionalProperties.Schema, basePath)
+ }
+ for k := range target.PatternProperties {
+ s := target.PatternProperties[k]
+ modifyRefs(&s, basePath)
+ target.PatternProperties[k] = s
+ }
+ for k := range target.Dependencies {
+ if target.Dependencies[k].Schema != nil {
+ modifyRefs(target.Dependencies[k].Schema, basePath)
+ }
+ }
+ if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
+ modifyRefs(target.AdditionalItems.Schema, basePath)
+ }
+ for k := range target.Definitions {
+ s := target.Definitions[k]
+ modifyRefs(&s, basePath)
+ target.Definitions[k] = s
+ }
+}
diff --git a/vendor/github.com/go-openapi/spec/refmodifier_test.go b/vendor/github.com/go-openapi/spec/refmodifier_test.go
new file mode 100644
index 000000000..c456cb43f
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/refmodifier_test.go
@@ -0,0 +1,335 @@
+// Copyright 2017 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var testJsonSchema = `{
+ "id": "http://json-schema.org/draft-04/schema#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "description": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "positiveInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "positiveIntegerDefault0": {
+ "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
+ },
+ "simpleTypes": {
+ "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ },
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": {},
+ "multipleOf": {
+ "type": "number",
+ "minimum": 0,
+ "exclusiveMinimum": true
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "boolean",
+ "default": false
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxLength": { "$ref": "#/definitions/positiveInteger" },
+ "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": {}
+ },
+ "maxItems": { "$ref": "#/definitions/positiveInteger" },
+ "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxProperties": { "$ref": "#/definitions/positiveInteger" },
+ "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "enum": {
+ "type": "array",
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "dependencies": {
+ "exclusiveMaximum": [ "maximum" ],
+ "exclusiveMinimum": [ "minimum" ]
+ },
+ "default": {}
+}
+`
+
+var modifiedTestJsonSchema = `{
+ "id": "http://json-schema.org/draft-04/schema#",
+ "$schema": "http://json-schema.org/draft-04/schema",
+ "description": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "" }
+ },
+ "positiveInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "positiveIntegerDefault0": {
+ "allOf": [ { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" }, { "default": 0 } ]
+ },
+ "simpleTypes": {
+ "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ },
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": {},
+ "multipleOf": {
+ "type": "number",
+ "minimum": 0,
+ "exclusiveMinimum": true
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "boolean",
+ "default": false
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxLength": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" },
+ "minLength": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "" }
+ ],
+ "default": {}
+ },
+ "items": {
+ "anyOf": [
+ { "$ref": "" },
+ { "$ref": "http://json-schema.org/draft-04/schema#/definitions/schemaArray" }
+ ],
+ "default": {}
+ },
+ "maxItems": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" },
+ "minItems": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxProperties": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" },
+ "minProperties": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" },
+ "required": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" },
+ "additionalProperties": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "" }
+ ],
+ "default": {}
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "" },
+ { "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" }
+ ]
+ }
+ },
+ "enum": {
+ "type": "array",
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "http://json-schema.org/draft-04/schema#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "allOf": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/schemaArray" },
+ "anyOf": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/schemaArray" },
+ "oneOf": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/schemaArray" },
+ "not": { "$ref": "" }
+ },
+ "dependencies": {
+ "exclusiveMaximum": [ "maximum" ],
+ "exclusiveMinimum": [ "minimum" ]
+ },
+ "default": {}
+}
+`
+
+func TestRefModifier(t *testing.T) {
+ sch := Schema{}
+ assert.NoError(t, json.Unmarshal([]byte(testJsonSchema), &sch))
+ modifyRefs(&sch, "http://json-schema.org/draft-04/schema")
+ b, err := sch.MarshalJSON()
+ assert.NoError(t, err)
+ assert.JSONEq(t, modifiedTestJsonSchema, string(b))
+}
diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go
index 393a31677..23780c78a 100644
--- a/vendor/github.com/go-openapi/spec/swagger.go
+++ b/vendor/github.com/go-openapi/spec/swagger.go
@@ -156,7 +156,7 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) {
if s.Schema != nil {
return json.Marshal(s.Schema)
}
- return nil, nil
+ return []byte("null"), nil
}
// UnmarshalJSON converts this schema object or array from a JSON structure
diff --git a/vendor/github.com/gogo/protobuf/.travis.yml b/vendor/github.com/gogo/protobuf/.travis.yml
index 31ebd3150..7f69fba09 100644
--- a/vendor/github.com/gogo/protobuf/.travis.yml
+++ b/vendor/github.com/gogo/protobuf/.travis.yml
@@ -1,7 +1,7 @@
env:
- PROTOBUF_VERSION=2.6.1
- PROTOBUF_VERSION=3.0.2
- - PROTOBUF_VERSION=3.1.0
+ - PROTOBUF_VERSION=3.4.0
before_install:
- ./install-protobuf.sh
@@ -10,15 +10,11 @@ before_install:
script:
- PATH=/home/travis/bin:$PATH make buildserverall
- echo $TRAVIS_GO_VERSION
- - if [ "$TRAVIS_GO_VERSION" == 1.8 ] && [[ "$PROTOBUF_VERSION" == 3.1.0 ]]; then ! git status --porcelain | read || (git status; git diff; exit 1); fi
+ - if [ "$TRAVIS_GO_VERSION" == 1.9 ] && [[ "$PROTOBUF_VERSION" == 3.4.0 ]]; then ! git status --porcelain | read || (git status; git diff; exit 1); fi
language: go
go:
- - 1.6.3
- - 1.7.1
- - 1.8
+ - 1.8.3
+ - 1.9
-matrix:
- allow_failures:
- - go: 1.6.3
diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS
index 2eaf4d53a..3d97fc7a2 100644
--- a/vendor/github.com/gogo/protobuf/AUTHORS
+++ b/vendor/github.com/gogo/protobuf/AUTHORS
@@ -10,5 +10,6 @@
# Please keep the list sorted.
+Sendgrid, Inc
Vastech SA (PTY) LTD
Walter Schulze <awalterschulze@gmail.com>
diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
index 84a85b1e8..b1abc4d30 100644
--- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS
+++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
@@ -1,4 +1,5 @@
Anton Povarov <anton.povarov@gmail.com>
+Brian Goff <cpuguy83@gmail.com>
Clayton Coleman <ccoleman@redhat.com>
Denis Smirnov <denis.smirnov.91@gmail.com>
DongYun Kang <ceram1000@gmail.com>
@@ -10,9 +11,12 @@ John Shahid <jvshahid@gmail.com>
John Tuley <john@tuley.org>
Laurent <laurent@adyoulike.com>
Patrick Lee <patrick@dropbox.com>
+Roger Johansson <rogeralsing@gmail.com>
+Sam Nguyen <sam.nguyen@sendgrid.com>
Sergio Arbeo <serabe@gmail.com>
Stephen J Day <stephen.day@docker.com>
Tamir Duberstein <tamird@gmail.com>
Todd Eisenberger <teisenberger@dropbox.com>
Tormod Erevik Lea <tormodlea@gmail.com>
+Vyacheslav Kim <kane@sendgrid.com>
Walter Schulze <awalterschulze@gmail.com>
diff --git a/vendor/github.com/gogo/protobuf/Makefile b/vendor/github.com/gogo/protobuf/Makefile
index 0dcb4ab71..17e329367 100644
--- a/vendor/github.com/gogo/protobuf/Makefile
+++ b/vendor/github.com/gogo/protobuf/Makefile
@@ -83,6 +83,7 @@ regenerate:
make -C test/oneof regenerate
make -C test/oneof3 regenerate
make -C test/theproto3 regenerate
+ make -C test/mapdefaults regenerate
make -C test/mapsproto2 regenerate
make -C test/issue42order regenerate
make -C proto generate-test-pbs
@@ -109,14 +110,19 @@ regenerate:
make -C test/issue260 regenerate
make -C test/issue261 regenerate
make -C test/issue262 regenerate
+ make -C test/issue312 regenerate
make -C test/enumdecl regenerate
make -C test/typedecl_all regenerate
make -C test/enumdecl_all regenerate
+ make -C test/int64support regenerate
+ make -C test/issue322 regenerate
+ make -C test/issue330 regenerate
make gofmt
tests:
go build ./test/enumprefix
go test ./...
+ (cd test/stdtypes && make test)
vet:
go vet ./...
@@ -138,15 +144,16 @@ testall:
make tests
bench:
+ go get golang.org/x/tools/cmd/benchcmp
(cd test/mixbench && go build .)
- (cd test/mixbench && ./mixbench)
+ ./test/mixbench/mixbench
contributors:
git log --format='%aN <%aE>' | sort -fu > CONTRIBUTORS
js:
-ifeq (go1.8, $(findstring go1.8, $(GO_VERSION)))
- go get github.com/gopherjs/gopherjs
+ifeq (go1.9, $(findstring go1.9, $(GO_VERSION)))
+ go get -u github.com/gopherjs/gopherjs
gopherjs build github.com/gogo/protobuf/protoc-gen-gogo
endif
diff --git a/vendor/github.com/gogo/protobuf/README b/vendor/github.com/gogo/protobuf/README
index 0ad513633..c82082723 100644
--- a/vendor/github.com/gogo/protobuf/README
+++ b/vendor/github.com/gogo/protobuf/README
@@ -25,7 +25,7 @@ To use this software, you must:
for details or, if you are using gccgo, follow the instructions at
https://golang.org/doc/install/gccgo
- Grab the code from the repository and install the proto package.
- The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`.
+ The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
compiler, protoc, to find it.
@@ -118,7 +118,6 @@ for a protocol buffer variable v:
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- - Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method.
Consider file test.proto, containing
diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md
index d31c13f83..b8eb4d14e 100644
--- a/vendor/github.com/gogo/protobuf/Readme.md
+++ b/vendor/github.com/gogo/protobuf/Readme.md
@@ -38,14 +38,19 @@ These projects use gogoprotobuf:
- <a href="https://github.com/docker/swarmkit">docker swarmkit</a> - <a href="https://github.com/docker/swarmkit/blob/63600e01af3b8da2a0ed1c9fa6e1ae4299d75edb/api/objects.proto">sample proto file</a>
- <a href="https://nats.io/">nats.io</a> - <a href="https://github.com/nats-io/go-nats-streaming/blob/master/pb/protocol.proto">go-nats-streaming</a>
- <a href="https://github.com/pingcap/tidb">tidb</a> - Communication between <a href="https://github.com/pingcap/tipb/blob/master/generate-go.sh#L4">tidb</a> and <a href="https://github.com/pingcap/kvproto/blob/master/generate_go.sh#L3">tikv</a>
- - <a href="https://github.com/AsynkronIT/protoactor-go">protoactor-go</a> - <a href="https://github.com/AsynkronIT/protoactor-go/blob/dev/protobuf/protoc-gen-protoactor/main.go">vanity command</a> that also generates actors from service definitions
-
-Please lets us know if you are using gogoprotobuf by posting on our <a href="https://groups.google.com/forum/#!topic/gogoprotobuf/Brw76BxmFpQ">GoogleGroup</a>.
+ - <a href="https://github.com/AsynkronIT/protoactor-go">protoactor-go</a> - <a href="https://github.com/AsynkronIT/protoactor-go/blob/master/protobuf/protoc-gen-protoactor/main.go">vanity command</a> that also generates actors from service definitions
+ - <a href="https://containerd.io/">containerd</a> - <a href="https://github.com/containerd/containerd/tree/master/cmd/protoc-gen-gogoctrd">vanity command with custom field names</a> that conforms to the golang convention.
+ - <a href="https://github.com/heroiclabs/nakama">nakama</a>
+ - <a href="https://github.com/src-d/proteus">proteus</a>
+ - <a href="https://github.com/go-graphite">carbonzipper stack</a>
+ - <a href="https://sendgrid.com/">SendGrid</a>
+
+Please let us know if you are using gogoprotobuf by posting on our <a href="https://groups.google.com/forum/#!topic/gogoprotobuf/Brw76BxmFpQ">GoogleGroup</a>.
### Mentioned
- <a href="http://www.slideshare.net/albertstrasheim/serialization-in-go">Cloudflare - go serialization talk - Albert Strasheim</a>
- - <a href="http://gophercon.sourcegraph.com/post/83747547505/writing-a-high-performance-database-in-go">gophercon</a>
+ - <a href="https://youtu.be/4xB46Xl9O9Q?t=557">GopherCon 2014 Writing High Performance Databases in Go by Ben Johnson</a>
- <a href="https://github.com/alecthomas/go_serialization_benchmarks">alecthomas' go serialization benchmarks</a>
## Getting Started
@@ -59,10 +64,10 @@ After that you can choose:
### Installation
-To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.7.1 and 1.8 is continuously tested.
+To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.8.3 and 1.9 are continuously tested.
Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf).
-Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.1.0 are continuously tested.
+Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.4.0 are continuously tested.
### Speed
diff --git a/vendor/github.com/gogo/protobuf/extensions.md b/vendor/github.com/gogo/protobuf/extensions.md
index 891359ac7..fcfe17af2 100644
--- a/vendor/github.com/gogo/protobuf/extensions.md
+++ b/vendor/github.com/gogo/protobuf/extensions.md
@@ -20,8 +20,8 @@ See [BenchComparison](https://github.com/gogo/protobuf/blob/master/bench.md) for
<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/size">sizer</a></td><td>Message</td><td>bool</td><td>if true, a Size method is generated for the specific message</td><td>false</td></tr>
<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/unmarshal">unmarshaler</a></td><td> Message </td><td> bool </td><td> if true, an Unmarshal method is generated for the specific message </td><td> false</td></tr>
<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/size">protosizer</a></td><td>Message</td><td>bool</td><td>if true, a ProtoSize method is generated for the specific message</td><td>false</td></tr>
-<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/marshalto"> unsafe_marshaler</a> </td><td> Message </td><td> bool </td><td> if true, a Marshal and MarshalTo method is generated for the specific message. The generated code uses the unsafe package and is not compatible with big endian CPUs. </td><td> false</td></tr>
-<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/unmarshal">unsafe_unmarshaler</a></td><td> Message </td><td> bool </td><td> if true, an Unmarshal method is generated for the specific message. The generated code uses the unsafe package and is not compatible with big endian CPUs. </td><td> false</td></tr>
+<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/marshalto"> unsafe_marshaler</a> (deprecated) </td><td> Message </td><td> bool </td><td> if true, a Marshal and MarshalTo method is generated. </td><td> false</td></tr>
+<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/unmarshal">unsafe_unmarshaler</a> (deprecated) </td><td> Message </td><td> bool </td><td> if true, an Unmarshal method is generated. </td><td> false</td></tr>
<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/marshalto">stable_marshaler</a></td><td> Message </td><td> bool </td><td> if true, a Marshal and MarshalTo method is generated for the specific message, but unlike marshaler the output is guaranteed to be deterministic, at the sacrifice of some speed</td><td> false </td></tr>
<tr><td>typedecl (beta)</td><td> Message </td><td> bool </td><td> if false, type declaration of the message is excluded from the generated output. Requires the marshaler and unmarshaler to be generated.</td><td> true </td></tr>
</table>
@@ -69,6 +69,7 @@ The enumprefix, getters and stringer extensions can be used to remove some of th
<tr><td> goproto_enum_stringer (experimental) </td><td> Enum </td><td> bool </td><td> if false, the enum is generated without the default string method, this is useful for rather using enum_stringer </td><td> true </td></tr>
<tr><td> goproto_extensions_map (beta) </td><td> Message </td><td> bool </td><td> if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension </td><td> true </td></tr>
<tr><td> goproto_unrecognized (beta) </td><td> Message </td><td> bool </td><td>if false, XXX_unrecognized field is not generated. This is useful to reduce GC pressure at the cost of losing information about unrecognized fields. </td><td> true </td></tr>
+<tr><td> goproto_registration (beta) </td><td> File </td><td> bool </td><td>if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). </td><td> false </td></tr>
</table>
# Less Typing
diff --git a/vendor/github.com/gogo/protobuf/install-protobuf.sh b/vendor/github.com/gogo/protobuf/install-protobuf.sh
index 10c9320ea..fc40642e4 100755
--- a/vendor/github.com/gogo/protobuf/install-protobuf.sh
+++ b/vendor/github.com/gogo/protobuf/install-protobuf.sh
@@ -7,23 +7,26 @@ die() {
exit 1
}
+cd /home/travis
+
case "$PROTOBUF_VERSION" in
2*)
basename=protobuf-$PROTOBUF_VERSION
+ wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.tar.gz
+ tar xzf $basename.tar.gz
+ cd protobuf-$PROTOBUF_VERSION
+ ./configure --prefix=/home/travis && make -j2 && make install
;;
3*)
- basename=protobuf-cpp-$PROTOBUF_VERSION
+ basename=protoc-$PROTOBUF_VERSION-linux-x86_64
+ wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.zip
+ unzip $basename.zip
;;
*)
die "unknown protobuf version: $PROTOBUF_VERSION"
;;
esac
-cd /home/travis
-wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.tar.gz
-tar xzf $basename.tar.gz
-cd protobuf-$PROTOBUF_VERSION
-./configure --prefix=/home/travis && make -j2 && make install
diff --git a/vendor/github.com/gogo/protobuf/proto/decode_test.go b/vendor/github.com/gogo/protobuf/proto/decode_test.go
index 0cfae71ec..64d4decd9 100644
--- a/vendor/github.com/gogo/protobuf/proto/decode_test.go
+++ b/vendor/github.com/gogo/protobuf/proto/decode_test.go
@@ -29,6 +29,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// +build go1.7
+
package proto_test
import (
diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go
index 2b30f8462..8b84d1b22 100644
--- a/vendor/github.com/gogo/protobuf/proto/encode.go
+++ b/vendor/github.com/gogo/protobuf/proto/encode.go
@@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int {
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) EncodeZigzag64(x uint64) error {
// use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+ return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
}
func sizeZigzag64(x uint64) int {
- return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+ return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
}
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
diff --git a/vendor/github.com/gogo/protobuf/proto/encode_test.go b/vendor/github.com/gogo/protobuf/proto/encode_test.go
index bc7e18ab5..2176b894d 100644
--- a/vendor/github.com/gogo/protobuf/proto/encode_test.go
+++ b/vendor/github.com/gogo/protobuf/proto/encode_test.go
@@ -29,6 +29,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// +build go1.7
+
package proto_test
import (
diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go
index 7580bb45c..c98d73da4 100644
--- a/vendor/github.com/gogo/protobuf/proto/lib.go
+++ b/vendor/github.com/gogo/protobuf/proto/lib.go
@@ -73,7 +73,6 @@ for a protocol buffer variable v:
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- - Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.
diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go
index 44b332052..2a69e8862 100644
--- a/vendor/github.com/gogo/protobuf/proto/properties.go
+++ b/vendor/github.com/gogo/protobuf/proto/properties.go
@@ -193,6 +193,7 @@ type Properties struct {
Default string // default value
HasDefault bool // whether an explicit default was provided
CustomType string
+ CastType string
StdTime bool
StdDuration bool
@@ -341,6 +342,8 @@ func (p *Properties) Parse(s string) {
p.OrigName = strings.Split(f, "=")[1]
case strings.HasPrefix(f, "customtype="):
p.CustomType = strings.Split(f, "=")[1]
+ case strings.HasPrefix(f, "casttype="):
+ p.CastType = strings.Split(f, "=")[1]
case f == "stdtime":
p.StdTime = true
case f == "stdduration":
diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go
index d63732fcb..f609d1d45 100644
--- a/vendor/github.com/gogo/protobuf/proto/text.go
+++ b/vendor/github.com/gogo/protobuf/proto/text.go
@@ -522,6 +522,17 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
}
return nil
}
+ } else if len(props.CastType) > 0 {
+ if _, ok := v.Interface().(interface {
+ String() string
+ }); ok {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ _, err := fmt.Fprintf(w, "%d", v.Interface())
+ return err
+ }
+ }
} else if props.StdTime {
t, ok := v.Interface().(time.Time)
if !ok {
@@ -531,9 +542,9 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
if err != nil {
return err
}
- props.StdTime = false
- err = tm.writeAny(w, reflect.ValueOf(tproto), props)
- props.StdTime = true
+ propsCopy := *props // Make a copy so that this is goroutine-safe
+ propsCopy.StdTime = false
+ err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy)
return err
} else if props.StdDuration {
d, ok := v.Interface().(time.Duration)
@@ -541,9 +552,9 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
}
dproto := durationProto(d)
- props.StdDuration = false
- err := tm.writeAny(w, reflect.ValueOf(dproto), props)
- props.StdDuration = true
+ propsCopy := *props // Make a copy so that this is goroutine-safe
+ propsCopy.StdDuration = false
+ err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy)
return err
}
}
diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go
index 9db12e960..f1276729a 100644
--- a/vendor/github.com/gogo/protobuf/proto/text_parser.go
+++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go
@@ -983,7 +983,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- fv.SetUint(uint64(x))
+ fv.SetUint(x)
return nil
}
case reflect.Uint64:
diff --git a/vendor/github.com/gogo/protobuf/proto/text_test.go b/vendor/github.com/gogo/protobuf/proto/text_test.go
index 652404842..27df6cb9b 100644
--- a/vendor/github.com/gogo/protobuf/proto/text_test.go
+++ b/vendor/github.com/gogo/protobuf/proto/text_test.go
@@ -339,13 +339,13 @@ func TestStringEscaping(t *testing.T) {
}
// Check round-trip.
- pb := new(pb.Strings)
- if err := proto.UnmarshalText(s, pb); err != nil {
+ pbStrings := new(pb.Strings)
+ if err := proto.UnmarshalText(s, pbStrings); err != nil {
t.Errorf("#%d: UnmarshalText: %v", i, err)
continue
}
- if !proto.Equal(pb, tc.in) {
- t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
+ if !proto.Equal(pbStrings, tc.in) {
+ t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pbStrings)
}
}
}
diff --git a/vendor/github.com/golang/protobuf/.travis.yml b/vendor/github.com/golang/protobuf/.travis.yml
index 24e22f85a..93c67805b 100644
--- a/vendor/github.com/golang/protobuf/.travis.yml
+++ b/vendor/github.com/golang/protobuf/.travis.yml
@@ -4,6 +4,7 @@ go:
- 1.6.x
- 1.7.x
- 1.8.x
+- 1.9.x
install:
- go get -v -d -t github.com/golang/protobuf/...
diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md
index 795f53f6f..9c4c815c0 100644
--- a/vendor/github.com/golang/protobuf/README.md
+++ b/vendor/github.com/golang/protobuf/README.md
@@ -1,6 +1,7 @@
# Go support for Protocol Buffers
[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf)
+[![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf)
Google's data interchange format.
Copyright 2010 The Go Authors.
@@ -111,6 +112,7 @@ When the .proto file specifies `syntax="proto3"`, there are some differences:
Consider file test.proto, containing
```proto
+ syntax = "proto2";
package example;
enum FOO { X = 17; };
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index 6c9a6cf74..f34601723 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -62,6 +62,16 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// any.Unpack(foo)
// ...
//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
index 9bd3f50a4..c74866762 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -74,6 +74,16 @@ option objc_class_prefix = "GPB";
// any.Unpack(foo)
// ...
//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 000000000..b2410a098
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,144 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/duration.proto
+
+/*
+Package duration is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/duration.proto
+
+It has these top-level messages:
+ Duration
+*/
+package duration
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (durations.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+type Duration struct {
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+}
+
+func (m *Duration) Reset() { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage() {}
+func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Duration) XXX_WellKnownType() string { return "Duration" }
+
+func (m *Duration) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Duration) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
+}
+
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 190 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
+ 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
+ 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
+ 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
+ 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
+ 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
+ 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
+ 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
+ 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
+ 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
+ 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
new file mode 100644
index 000000000..975fce41a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
@@ -0,0 +1,117 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/duration";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DurationProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (durations.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+message Duration {
+
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+ int64 seconds = 1;
+
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 000000000..e23e4a25d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,160 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
+
+/*
+Package timestamp is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/timestamp.proto
+
+It has these top-level messages:
+ Timestamp
+*/
+package timestamp
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required, though only UTC (as indicated by "Z") is presently supported.
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
+// to obtain a formatter capable of generating timestamps in this format.
+//
+//
+type Timestamp struct {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+}
+
+func (m *Timestamp) Reset() { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage() {}
+func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+
+func (m *Timestamp) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Timestamp) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
+}
+
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 191 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
+ 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
+ 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
+ 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
+ 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
+ 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
+ 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
+ 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
+ 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
+ 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
+ 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
new file mode 100644
index 000000000..b7cbd1750
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -0,0 +1,133 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/timestamp";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TimestampProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required, though only UTC (as indicated by "Z") is presently supported.
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
+// to obtain a formatter capable of generating timestamps in this format.
+//
+//
+message Timestamp {
+
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ int64 seconds = 1;
+
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ int32 nanos = 2;
+}
diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..469b44907
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
index f4769cdd7..b13106979 100644
--- a/vendor/github.com/imdario/mergo/README.md
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -30,7 +30,7 @@ If you were using Mergo **before** April 6th 2015, please check your project wor
### Mergo in the wild
- [docker/docker](https://github.com/docker/docker/)
-- [GoogleCloudPlatform/kubernetes](https://github.com/GoogleCloudPlatform/kubernetes)
+- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
- [imdario/zas](https://github.com/imdario/zas)
- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
- [EagerIO/Stout](https://github.com/EagerIO/Stout)
@@ -50,6 +50,7 @@ If you were using Mergo **before** April 6th 2015, please check your project wor
- [thoas/picfit](https://github.com/thoas/picfit)
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
+- [Iris Web Framework](https://github.com/kataras/iris)
## Installation
@@ -64,15 +65,27 @@ If you were using Mergo **before** April 6th 2015, please check your project wor
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
- if err := mergo.Merge(&dst, src); err != nil {
- // ...
- }
+```go
+if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+}
+```
+
+Also, you can merge overwriting values using MergeWithOverwrite.
+
+```go
+if err := mergo.MergeWithOverwrite(&dst, src); err != nil {
+ // ...
+}
+```
Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
- if err := mergo.Map(&dst, srcMap); err != nil {
- // ...
- }
+```go
+if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+}
+```
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
@@ -96,11 +109,11 @@ type Foo struct {
func main() {
src := Foo{
A: "one",
+ B: 2,
}
dest := Foo{
A: "two",
- B: 2,
}
mergo.Merge(&dest, src)
diff --git a/vendor/github.com/imdario/mergo/issue23_test.go b/vendor/github.com/imdario/mergo/issue23_test.go
new file mode 100644
index 000000000..9c3258413
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/issue23_test.go
@@ -0,0 +1,27 @@
+package mergo
+
+import (
+ "testing"
+ "time"
+)
+
+type document struct {
+ Created *time.Time
+}
+
+func TestIssue23MergeWithOverwrite(t *testing.T) {
+ now := time.Now()
+ dst := document{
+ &now,
+ }
+ expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
+ src := document{
+ &expected,
+ }
+ if err := MergeWithOverwrite(&dst, src); err != nil {
+ t.Errorf("Error while merging %s", err)
+ }
+ if dst.Created != src.Created {
+ t.Fatalf("Created not merged in properly: dst.Created(%v) != src.Created(%v)", dst.Created, src.Created)
+ }
+}
diff --git a/vendor/github.com/imdario/mergo/issue38_test.go b/vendor/github.com/imdario/mergo/issue38_test.go
new file mode 100644
index 000000000..286b68cb1
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/issue38_test.go
@@ -0,0 +1,59 @@
+package mergo
+
+import (
+ "testing"
+ "time"
+)
+
+type structWithoutTimePointer struct {
+ Created time.Time
+}
+
+func TestIssue38Merge(t *testing.T) {
+ dst := structWithoutTimePointer{
+ time.Now(),
+ }
+
+ expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
+ src := structWithoutTimePointer{
+ expected,
+ }
+ if err := Merge(&dst, src); err != nil {
+ t.Errorf("Error while merging %s", err)
+ }
+ if dst.Created == src.Created {
+ t.Fatalf("Created merged unexpectedly: dst.Created(%v) == src.Created(%v)", dst.Created, src.Created)
+ }
+}
+
+func TestIssue38MergeEmptyStruct(t *testing.T) {
+ dst := structWithoutTimePointer{}
+
+ expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
+ src := structWithoutTimePointer{
+ expected,
+ }
+ if err := Merge(&dst, src); err != nil {
+ t.Errorf("Error while merging %s", err)
+ }
+ if dst.Created == src.Created {
+ t.Fatalf("Created merged unexpectedly: dst.Created(%v) == src.Created(%v)", dst.Created, src.Created)
+ }
+}
+
+func TestIssue38MergeWithOverwrite(t *testing.T) {
+ dst := structWithoutTimePointer{
+ time.Now(),
+ }
+
+ expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
+ src := structWithoutTimePointer{
+ expected,
+ }
+ if err := MergeWithOverwrite(&dst, src); err != nil {
+ t.Errorf("Error while merging %s", err)
+ }
+ if dst.Created != src.Created {
+ t.Fatalf("Created not merged in properly: dst.Created(%v) != src.Created(%v)", dst.Created, src.Created)
+ }
+}
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
index 8e8c4ba8e..99002565f 100644
--- a/vendor/github.com/imdario/mergo/map.go
+++ b/vendor/github.com/imdario/mergo/map.go
@@ -61,6 +61,13 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over
dstMap[fieldName] = src.Field(i).Interface()
}
}
+ case reflect.Ptr:
+ if dst.IsNil() {
+ v := reflect.New(dst.Type().Elem())
+ dst.Set(v)
+ }
+ dst = dst.Elem()
+ fallthrough
case reflect.Struct:
srcMap := src.Interface().(map[string]interface{})
for key := range srcMap {
@@ -85,6 +92,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over
srcKind = reflect.Ptr
}
}
+
if !srcElement.IsValid() {
continue
}
@@ -92,14 +100,16 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over
if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
return
}
- } else {
- if srcKind == reflect.Map {
- if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
- return
- }
- } else {
- return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+ } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+ return
+ }
+ } else if srcKind == reflect.Map {
+ if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+ return
}
+ } else {
+ return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
}
}
}
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
index 11e55b1e2..052b9fe78 100644
--- a/vendor/github.com/imdario/mergo/merge.go
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -12,6 +12,18 @@ import (
"reflect"
)
+func hasExportedField(dst reflect.Value) (exported bool) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ field := dst.Type().Field(i)
+ if field.Anonymous {
+ exported = exported || hasExportedField(dst.Field(i))
+ } else {
+ exported = exported || len(field.PkgPath) == 0
+ }
+ }
+ return
+}
+
// Traverses recursively both values, assigning src's fields values to dst.
// The map argument tracks comparisons that have already been seen, which allows
// short circuiting on recursive types.
@@ -34,12 +46,22 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov
}
switch dst.Kind() {
case reflect.Struct:
- for i, n := 0, dst.NumField(); i < n; i++ {
- if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil {
- return
+ if hasExportedField(dst) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil {
+ return
+ }
+ }
+ } else {
+ if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
}
}
case reflect.Map:
+ if len(src.MapKeys()) == 0 && !src.IsNil() && len(dst.MapKeys()) == 0 {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ return
+ }
for _, key := range src.MapKeys() {
srcElement := src.MapIndex(key)
if !srcElement.IsValid() {
@@ -67,6 +89,10 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov
}
}
}
+ if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map {
+ continue
+ }
+
if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) {
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
@@ -77,9 +103,27 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov
case reflect.Ptr:
fallthrough
case reflect.Interface:
+ if src.Kind() != reflect.Interface {
+ if dst.IsNil() || overwrite {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ } else if src.Kind() == reflect.Ptr {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil {
+ return
+ }
+ } else if dst.Elem().Type() == src.Type() {
+ if err = deepMerge(dst.Elem(), src, visited, depth+1, overwrite); err != nil {
+ return
+ }
+ } else {
+ return ErrDifferentArgumentsTypes
+ }
+ break
+ }
if src.IsNil() {
break
- } else if dst.IsNil() {
+ } else if dst.IsNil() || overwrite {
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
dst.Set(src)
}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
index f8a0991ec..79ccdf5cb 100644
--- a/vendor/github.com/imdario/mergo/mergo.go
+++ b/vendor/github.com/imdario/mergo/mergo.go
@@ -45,7 +45,7 @@ func isEmptyValue(v reflect.Value) bool {
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
+ case reflect.Interface, reflect.Ptr, reflect.Func:
return v.IsNil()
}
return false
diff --git a/vendor/github.com/imdario/mergo/mergo_test.go b/vendor/github.com/imdario/mergo/mergo_test.go
index dd2651b31..e167c332a 100644
--- a/vendor/github.com/imdario/mergo/mergo_test.go
+++ b/vendor/github.com/imdario/mergo/mergo_test.go
@@ -6,12 +6,11 @@
package mergo
import (
+ "gopkg.in/yaml.v2"
"io/ioutil"
"reflect"
"testing"
"time"
-
- "gopkg.in/yaml.v1"
)
type simpleTest struct {
@@ -24,6 +23,14 @@ type complexTest struct {
ID string
}
+type mapTest struct {
+ M map[int]int
+}
+
+type ifcTest struct {
+ I interface{}
+}
+
type moreComplextText struct {
Ct complexTest
St simpleTest
@@ -244,6 +251,50 @@ func TestSliceStruct(t *testing.T) {
}
}
+func TestEmptyMaps(t *testing.T) {
+ a := mapTest{}
+ b := mapTest{
+ map[int]int{},
+ }
+ if err := Merge(&a, b); err != nil {
+ t.Fail()
+ }
+ if !reflect.DeepEqual(a, b) {
+ t.FailNow()
+ }
+}
+
+func TestEmptyToEmptyMaps(t *testing.T) {
+ a := mapTest{}
+ b := mapTest{}
+ if err := Merge(&a, b); err != nil {
+ t.Fail()
+ }
+ if !reflect.DeepEqual(a, b) {
+ t.FailNow()
+ }
+}
+
+func TestEmptyToNotEmptyMaps(t *testing.T) {
+ a := mapTest{map[int]int{
+ 1: 2,
+ 3: 4,
+ }}
+ aa := mapTest{map[int]int{
+ 1: 2,
+ 3: 4,
+ }}
+ b := mapTest{
+ map[int]int{},
+ }
+ if err := Merge(&a, b); err != nil {
+ t.Fail()
+ }
+ if !reflect.DeepEqual(a, aa) {
+ t.FailNow()
+ }
+}
+
func TestMapsWithOverwrite(t *testing.T) {
m := map[string]simpleTest{
"a": {}, // overwritten by 16
@@ -318,7 +369,8 @@ func TestYAMLMaps(t *testing.T) {
license := loadYAML("testdata/license.yml")
ft := thing["fields"].(map[interface{}]interface{})
fl := license["fields"].(map[interface{}]interface{})
- expectedLength := len(ft) + len(fl)
+ // license has one extra field (site) and another already existing in thing (author) that Mergo won't override.
+ expectedLength := len(ft) + len(fl) - 1
if err := Merge(&license, thing); err != nil {
t.Fatal(err.Error())
}
@@ -393,6 +445,45 @@ func TestSimpleMap(t *testing.T) {
}
}
+func TestIfcMap(t *testing.T) {
+ a := ifcTest{}
+ b := ifcTest{42}
+ if err := Map(&a, b); err != nil {
+ t.FailNow()
+ }
+ if a.I != 42 {
+ t.Fatalf("b not merged in properly: a.I(%d) != b.I(%d)", a.I, b.I)
+ }
+ if !reflect.DeepEqual(a, b) {
+ t.FailNow()
+ }
+}
+
+func TestIfcMapNoOverwrite(t *testing.T) {
+ a := ifcTest{13}
+ b := ifcTest{42}
+ if err := Map(&a, b); err != nil {
+ t.FailNow()
+ }
+ if a.I != 13 {
+ t.Fatalf("a not left alone: a.I(%d) == b.I(%d)", a.I, b.I)
+ }
+}
+
+func TestIfcMapWithOverwrite(t *testing.T) {
+ a := ifcTest{13}
+ b := ifcTest{42}
+ if err := MapWithOverwrite(&a, b); err != nil {
+ t.FailNow()
+ }
+ if a.I != 42 {
+ t.Fatalf("b not merged in properly: a.I(%d) != b.I(%d)", a.I, b.I)
+ }
+ if !reflect.DeepEqual(a, b) {
+ t.FailNow()
+ }
+}
+
type pointerMapTest struct {
A int
hidden int
@@ -434,6 +525,29 @@ func TestBackAndForth(t *testing.T) {
}
}
+func TestEmbeddedPointerUnpacking(t *testing.T) {
+ tests := []struct{ input pointerMapTest }{
+ {pointerMapTest{42, 1, nil}},
+ {pointerMapTest{42, 1, &simpleTest{66}}},
+ }
+ newValue := 77
+ m := map[string]interface{}{
+ "b": map[string]interface{}{
+ "value": newValue,
+ },
+ }
+ for _, test := range tests {
+ pt := test.input
+ if err := MapWithOverwrite(&pt, m); err != nil {
+ t.FailNow()
+ }
+ if pt.B.Value != newValue {
+ t.Fatalf("pt not mapped properly: pt.A.Value(%d) != m[`b`][`value`](%d)", pt.B.Value, newValue)
+ }
+
+ }
+}
+
type structWithTimePointer struct {
Birth *time.Time
}
@@ -523,3 +637,26 @@ func TestUnexportedProperty(t *testing.T) {
}()
Merge(&a, b)
}
+
+type structWithBoolPointer struct {
+ C *bool
+}
+
+func TestBooleanPointer(t *testing.T) {
+ bt, bf := true, false
+ src := structWithBoolPointer{
+ &bt,
+ }
+ dst := structWithBoolPointer{
+ &bf,
+ }
+ if err := Merge(&dst, src); err != nil {
+ t.FailNow()
+ }
+ if dst.C == src.C {
+ t.Fatalf("dst.C should be a different pointer than src.C")
+ }
+ if *dst.C != *src.C {
+ t.Fatalf("dst.C should be true")
+ }
+}
diff --git a/vendor/github.com/juju/ratelimit/ratelimit.go b/vendor/github.com/juju/ratelimit/ratelimit.go
index 1c3f25b2e..bd9ef1038 100644
--- a/vendor/github.com/juju/ratelimit/ratelimit.go
+++ b/vendor/github.com/juju/ratelimit/ratelimit.go
@@ -14,43 +14,62 @@ import (
"time"
)
+// The algorithm that this implementation uses does computational work
+// only when tokens are removed from the bucket, and that work completes
+// in short, bounded-constant time (Bucket.Wait benchmarks at 175ns on
+// my laptop).
+//
+// Time is measured in equal measured ticks, a given interval
+// (fillInterval) apart. On each tick a number of tokens (quantum) are
+// added to the bucket.
+//
+// When any of the methods are called the bucket updates the number of
+// tokens that are in the bucket, and it records the current tick
+// number too. Note that it doesn't record the current time - by
+// keeping things in units of whole ticks, it's easy to dish out tokens
+// at exactly the right intervals as measured from the start time.
+//
+// This allows us to calculate the number of tokens that will be
+// available at some time in the future with a few simple arithmetic
+// operations.
+//
+// The main reason for being able to transfer multiple tokens on each tick
+// is so that we can represent rates greater than 1e9 (the resolution of the Go
+// time package) tokens per second, but it's also useful because
+// it means we can easily represent situations like "a person gets
+// five tokens an hour, replenished on the hour".
+
// Bucket represents a token bucket that fills at a predetermined rate.
// Methods on Bucket may be called concurrently.
type Bucket struct {
- startTime time.Time
- capacity int64
- quantum int64
+ clock Clock
+
+ // startTime holds the moment when the bucket was
+ // first created and ticks began.
+ startTime time.Time
+
+ // capacity holds the overall capacity of the bucket.
+ capacity int64
+
+ // quantum holds how many tokens are added on
+ // each tick.
+ quantum int64
+
+ // fillInterval holds the interval between each tick.
fillInterval time.Duration
- clock Clock
- // The mutex guards the fields following it.
+ // mu guards the fields below it.
mu sync.Mutex
- // avail holds the number of available tokens
- // in the bucket, as of availTick ticks from startTime.
+ // availableTokens holds the number of available
+ // tokens as of the associated latestTick.
// It will be negative when there are consumers
// waiting for tokens.
- avail int64
- availTick int64
-}
+ availableTokens int64
-// Clock is used to inject testable fakes.
-type Clock interface {
- Now() time.Time
- Sleep(d time.Duration)
-}
-
-// realClock implements Clock in terms of standard time functions.
-type realClock struct{}
-
-// Now is identical to time.Now.
-func (realClock) Now() time.Time {
- return time.Now()
-}
-
-// Sleep is identical to time.Sleep.
-func (realClock) Sleep(d time.Duration) {
- time.Sleep(d)
+ // latestTick holds the latest tick for which
+ // we know the number of tokens in the bucket.
+ latestTick int64
}
// NewBucket returns a new token bucket that fills at the
@@ -58,7 +77,7 @@ func (realClock) Sleep(d time.Duration) {
// maximum capacity. Both arguments must be
// positive. The bucket is initially full.
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
- return NewBucketWithClock(fillInterval, capacity, realClock{})
+ return NewBucketWithClock(fillInterval, capacity, nil)
}
// NewBucketWithClock is identical to NewBucket but injects a testable clock
@@ -77,18 +96,22 @@ const rateMargin = 0.01
// at high rates, the actual rate may be up to 1% different from the
// specified rate.
func NewBucketWithRate(rate float64, capacity int64) *Bucket {
- return NewBucketWithRateAndClock(rate, capacity, realClock{})
+ return NewBucketWithRateAndClock(rate, capacity, nil)
}
// NewBucketWithRateAndClock is identical to NewBucketWithRate but injects a
// testable clock interface.
func NewBucketWithRateAndClock(rate float64, capacity int64, clock Clock) *Bucket {
+ // Use the same bucket each time through the loop
+ // to save allocations.
+ tb := NewBucketWithQuantumAndClock(1, capacity, 1, clock)
for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
fillInterval := time.Duration(1e9 * float64(quantum) / rate)
if fillInterval <= 0 {
continue
}
- tb := NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, clock)
+ tb.fillInterval = fillInterval
+ tb.quantum = quantum
if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
return tb
}
@@ -111,12 +134,16 @@ func nextQuantum(q int64) int64 {
// the specification of the quantum size - quantum tokens
// are added every fillInterval.
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
- return NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, realClock{})
+ return NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, nil)
}
-// NewBucketWithQuantumAndClock is identical to NewBucketWithQuantum but injects
-// a testable clock interface.
+// NewBucketWithQuantumAndClock is like NewBucketWithQuantum, but
+// also has a clock argument that allows clients to fake the passing
+// of time. If clock is nil, the system clock will be used.
func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum int64, clock Clock) *Bucket {
+ if clock == nil {
+ clock = realClock{}
+ }
if fillInterval <= 0 {
panic("token bucket fill interval is not > 0")
}
@@ -127,12 +154,13 @@ func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum
panic("token bucket quantum is not > 0")
}
return &Bucket{
- clock: clock,
- startTime: clock.Now(),
- capacity: capacity,
- quantum: quantum,
- avail: capacity,
- fillInterval: fillInterval,
+ clock: clock,
+ startTime: clock.Now(),
+ latestTick: 0,
+ fillInterval: fillInterval,
+ capacity: capacity,
+ quantum: quantum,
+ availableTokens: capacity,
}
}
@@ -166,6 +194,8 @@ const infinityDuration time.Duration = 0x7fffffffffffffff
// Note that if the request is irrevocable - there is no way to return
// tokens to the bucket once this method commits us to taking them.
func (tb *Bucket) Take(count int64) time.Duration {
+ tb.mu.Lock()
+ defer tb.mu.Unlock()
d, _ := tb.take(tb.clock.Now(), count, infinityDuration)
return d
}
@@ -180,6 +210,8 @@ func (tb *Bucket) Take(count int64) time.Duration {
// wait until the tokens are actually available, and reports
// true.
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
+ tb.mu.Lock()
+ defer tb.mu.Unlock()
return tb.take(tb.clock.Now(), count, maxWait)
}
@@ -187,6 +219,8 @@ func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Dura
// bucket. It returns the number of tokens removed, or zero if there are
// no available tokens. It does not block.
func (tb *Bucket) TakeAvailable(count int64) int64 {
+ tb.mu.Lock()
+ defer tb.mu.Unlock()
return tb.takeAvailable(tb.clock.Now(), count)
}
@@ -196,17 +230,14 @@ func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
if count <= 0 {
return 0
}
- tb.mu.Lock()
- defer tb.mu.Unlock()
-
- tb.adjust(now)
- if tb.avail <= 0 {
+ tb.adjustavailableTokens(tb.currentTick(now))
+ if tb.availableTokens <= 0 {
return 0
}
- if count > tb.avail {
- count = tb.avail
+ if count > tb.availableTokens {
+ count = tb.availableTokens
}
- tb.avail -= count
+ tb.availableTokens -= count
return count
}
@@ -225,8 +256,8 @@ func (tb *Bucket) Available() int64 {
func (tb *Bucket) available(now time.Time) int64 {
tb.mu.Lock()
defer tb.mu.Unlock()
- tb.adjust(now)
- return tb.avail
+ tb.adjustavailableTokens(tb.currentTick(now))
+ return tb.availableTokens
}
// Capacity returns the capacity that the bucket was created with.
@@ -245,40 +276,69 @@ func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.
if count <= 0 {
return 0, true
}
- tb.mu.Lock()
- defer tb.mu.Unlock()
- currentTick := tb.adjust(now)
- avail := tb.avail - count
+ tick := tb.currentTick(now)
+ tb.adjustavailableTokens(tick)
+ avail := tb.availableTokens - count
if avail >= 0 {
- tb.avail = avail
+ tb.availableTokens = avail
return 0, true
}
// Round up the missing tokens to the nearest multiple
// of quantum - the tokens won't be available until
// that tick.
- endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum
+
+ // endTick holds the tick when all the requested tokens will
+ // become available.
+ endTick := tick + (-avail+tb.quantum-1)/tb.quantum
endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval)
waitTime := endTime.Sub(now)
if waitTime > maxWait {
return 0, false
}
- tb.avail = avail
+ tb.availableTokens = avail
return waitTime, true
}
-// adjust adjusts the current bucket capacity based on the current time.
-// It returns the current tick.
-func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
- currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval)
+// currentTick returns the current time tick, measured
+// from tb.startTime.
+func (tb *Bucket) currentTick(now time.Time) int64 {
+ return int64(now.Sub(tb.startTime) / tb.fillInterval)
+}
- if tb.avail >= tb.capacity {
+// adjustavailableTokens adjusts the current number of tokens
+// available in the bucket at the given time, which must
+// be in the future (positive) with respect to tb.latestTick.
+func (tb *Bucket) adjustavailableTokens(tick int64) {
+ if tb.availableTokens >= tb.capacity {
return
}
- tb.avail += (currentTick - tb.availTick) * tb.quantum
- if tb.avail > tb.capacity {
- tb.avail = tb.capacity
+ tb.availableTokens += (tick - tb.latestTick) * tb.quantum
+ if tb.availableTokens > tb.capacity {
+ tb.availableTokens = tb.capacity
}
- tb.availTick = currentTick
+ tb.latestTick = tick
return
}
+
+// Clock represents the passage of time in a way that
+// can be faked out for tests.
+type Clock interface {
+ // Now returns the current time.
+ Now() time.Time
+ // Sleep sleeps for at least the given duration.
+ Sleep(d time.Duration)
+}
+
+// realClock implements Clock in terms of standard time functions.
+type realClock struct{}
+
+// Now implements Clock.Now by calling time.Now.
+func (realClock) Now() time.Time {
+ return time.Now()
+}
+
+// Now implements Clock.Sleep by calling time.Sleep.
+func (realClock) Sleep(d time.Duration) {
+ time.Sleep(d)
+}
diff --git a/vendor/github.com/juju/ratelimit/ratelimit_test.go b/vendor/github.com/juju/ratelimit/ratelimit_test.go
index 62d88ded0..3de0cad6e 100644
--- a/vendor/github.com/juju/ratelimit/ratelimit_test.go
+++ b/vendor/github.com/juju/ratelimit/ratelimit_test.go
@@ -344,7 +344,7 @@ func checkRate(c *gc.C, rate float64) {
}
}
-func (rateLimitSuite) TestNewWithRate(c *gc.C) {
+func (rateLimitSuite) NewBucketWithRate(c *gc.C) {
for rate := float64(1); rate < 1e6; rate += 7 {
checkRate(c, rate)
}
@@ -357,6 +357,7 @@ func (rateLimitSuite) TestNewWithRate(c *gc.C) {
0.9e8,
3e12,
4e18,
+ float64(1<<63 - 1),
} {
checkRate(c, rate)
checkRate(c, rate/3)
@@ -387,3 +388,9 @@ func BenchmarkWait(b *testing.B) {
tb.Wait(1)
}
}
+
+func BenchmarkNewBucket(b *testing.B) {
+ for i := b.N - 1; i >= 0; i-- {
+ NewBucketWithRate(4e18, 1<<62)
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/.travis.yml b/vendor/github.com/mailru/easyjson/.travis.yml
index 3e5ac1320..884f8bbdf 100644
--- a/vendor/github.com/mailru/easyjson/.travis.yml
+++ b/vendor/github.com/mailru/easyjson/.travis.yml
@@ -1,8 +1,9 @@
language: go
go:
- - tip
+ - tip
install:
- - go get github.com/ugorji/go/codec
- - go get github.com/pquerna/ffjson/fflib/v1
- - go get github.com/golang/lint/golint
+ - go get github.com/ugorji/go/codec
+ - go get github.com/pquerna/ffjson/fflib/v1
+ - go get github.com/json-iterator/go
+ - go get github.com/golang/lint/golint
diff --git a/vendor/github.com/mailru/easyjson/Makefile b/vendor/github.com/mailru/easyjson/Makefile
index 8e720a084..f877ab269 100644
--- a/vendor/github.com/mailru/easyjson/Makefile
+++ b/vendor/github.com/mailru/easyjson/Makefile
@@ -4,7 +4,7 @@ export GOPATH
all: test
-.root/src/$(PKG):
+.root/src/$(PKG):
mkdir -p $@
for i in $$PWD/* ; do ln -s $$i $@/`basename $$i` ; done
@@ -12,6 +12,7 @@ root: .root/src/$(PKG)
clean:
rm -rf .root
+ rm -rf tests/*_easyjson.go
build:
go build -i -o .root/bin/easyjson $(PKG)/easyjson
@@ -45,6 +46,7 @@ test: generate root
bench-other: generate root
@go test -benchmem -bench . $(PKG)/benchmark
@go test -benchmem -tags use_ffjson -bench . $(PKG)/benchmark
+ @go test -benchmem -tags use_jsoniter -bench . $(PKG)/benchmark
@go test -benchmem -tags use_codec -bench . $(PKG)/benchmark
bench-python:
diff --git a/vendor/github.com/mailru/easyjson/README.md b/vendor/github.com/mailru/easyjson/README.md
index d19751e0e..9366e3f71 100644
--- a/vendor/github.com/mailru/easyjson/README.md
+++ b/vendor/github.com/mailru/easyjson/README.md
@@ -56,7 +56,7 @@ Usage of easyjson:
```
Using `-all` will generate marshalers/unmarshalers for all Go structs in the
-file. If `-all` is not provided, then only those structs whose preceeding
+file. If `-all` is not provided, then only those structs whose preceding
comment starts with `easyjson:json` will have marshalers/unmarshalers
generated. For example:
diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
index e81f1031b..e5558ae39 100644
--- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go
+++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
@@ -6,6 +6,7 @@ package jlexer
import (
"encoding/base64"
+ "encoding/json"
"errors"
"fmt"
"io"
@@ -903,6 +904,10 @@ func (r *Lexer) UintStr() uint {
return uint(r.Uint64Str())
}
+func (r *Lexer) UintptrStr() uintptr {
+ return uintptr(r.Uint64Str())
+}
+
func (r *Lexer) Int8Str() int8 {
s, b := r.unsafeString()
if !r.Ok() {
@@ -1043,6 +1048,28 @@ func (r *Lexer) GetNonFatalErrors() []*LexerError {
return r.multipleErrors
}
+// JsonNumber fetches and json.Number from 'encoding/json' package.
+// Both int, float or string, contains them are valid values
+func (r *Lexer) JsonNumber() json.Number {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() {
+ r.errInvalidToken("json.Number")
+ return json.Number("0")
+ }
+
+ switch r.token.kind {
+ case tokenString:
+ return json.Number(r.String())
+ case tokenNumber:
+ return json.Number(r.Raw())
+ default:
+ r.errSyntax()
+ return json.Number("0")
+ }
+}
+
// Interface fetches an interface{} analogous to the 'encoding/json' package.
func (r *Lexer) Interface() interface{} {
if r.token.kind == tokenUndef && r.Ok() {
diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go b/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go
index b8a649898..4ce4abe6a 100644
--- a/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go
+++ b/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go
@@ -2,6 +2,7 @@ package jlexer
import (
"bytes"
+ "encoding/json"
"reflect"
"testing"
)
@@ -249,3 +250,62 @@ func TestConsumed(t *testing.T) {
}
}
}
+
+func TestJsonNumber(t *testing.T) {
+ for i, test := range []struct {
+ toParse string
+ want json.Number
+ wantLexerError bool
+ wantValue interface{}
+ wantValueError bool
+ }{
+ {toParse: `10`, want: json.Number("10"), wantValue: int64(10)},
+ {toParse: `0`, want: json.Number("0"), wantValue: int64(0)},
+ {toParse: `0.12`, want: json.Number("0.12"), wantValue: 0.12},
+ {toParse: `25E-4`, want: json.Number("25E-4"), wantValue: 25E-4},
+
+ {toParse: `"10"`, want: json.Number("10"), wantValue: int64(10)},
+ {toParse: `"0"`, want: json.Number("0"), wantValue: int64(0)},
+ {toParse: `"0.12"`, want: json.Number("0.12"), wantValue: 0.12},
+ {toParse: `"25E-4"`, want: json.Number("25E-4"), wantValue: 25E-4},
+
+ {toParse: `"a""`, wantValueError: true},
+
+ {toParse: `[1]`, wantLexerError: true},
+ {toParse: `{}`, wantLexerError: true},
+ {toParse: `a`, wantLexerError: true},
+ } {
+ l := Lexer{Data: []byte(test.toParse)}
+
+ got := l.JsonNumber()
+ if got != test.want && !test.wantLexerError && !test.wantValueError {
+ t.Errorf("[%d, %q] JsonNumber() = %v; want %v", i, test.toParse, got, test.want)
+ }
+
+ err := l.Error()
+ if err != nil && !test.wantLexerError {
+ t.Errorf("[%d, %q] JsonNumber() lexer error: %v", i, test.toParse, err)
+ } else if err == nil && test.wantLexerError {
+ t.Errorf("[%d, %q] JsonNumber() ok; want lexer error", i, test.toParse)
+ }
+
+ var valueErr error
+ var gotValue interface{}
+ switch test.wantValue.(type) {
+ case float64:
+ gotValue, valueErr = got.Float64()
+ default:
+ gotValue, valueErr = got.Int64()
+ }
+
+ if !reflect.DeepEqual(gotValue, test.wantValue) && !test.wantLexerError && !test.wantValueError {
+ t.Errorf("[%d, %q] JsonNumber() = %v; want %v", i, test.toParse, gotValue, test.wantValue)
+ }
+
+ if valueErr != nil && !test.wantValueError {
+ t.Errorf("[%d, %q] JsonNumber() value error: %v", i, test.toParse, err)
+ } else if valueErr == nil && test.wantValueError {
+ t.Errorf("[%d, %q] JsonNumber() ok; want value error", i, test.toParse)
+ }
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go
index 7b55293a0..250920d85 100644
--- a/vendor/github.com/mailru/easyjson/jwriter/writer.go
+++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go
@@ -196,6 +196,13 @@ func (w *Writer) Uint64Str(n uint64) {
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
+func (w *Writer) UintptrStr(n uintptr) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
func (w *Writer) Int8Str(n int8) {
w.Buffer.EnsureSpace(4)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
index 6ec5c3335..30a9957c6 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -686,7 +686,11 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
// Compile the list of all the fields that we're going to be decoding
// from all the structs.
- fields := make(map[*reflect.StructField]reflect.Value)
+ type field struct {
+ field reflect.StructField
+ val reflect.Value
+ }
+ fields := []field{}
for len(structs) > 0 {
structVal := structs[0]
structs = structs[1:]
@@ -718,14 +722,16 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
}
// Normal struct field, store it away
- fields[&fieldType] = structVal.Field(i)
+ fields = append(fields, field{fieldType, structVal.Field(i)})
}
}
- for fieldType, field := range fields {
- fieldName := fieldType.Name
+ // for fieldType, field := range fields {
+ for _, f := range fields {
+ field, fieldValue := f.field, f.val
+ fieldName := field.Name
- tagValue := fieldType.Tag.Get(d.config.TagName)
+ tagValue := field.Tag.Get(d.config.TagName)
tagValue = strings.SplitN(tagValue, ",", 2)[0]
if tagValue != "" {
fieldName = tagValue
@@ -760,14 +766,14 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
// Delete the key we're using from the unused map so we stop tracking
delete(dataValKeysUnused, rawMapKey.Interface())
- if !field.IsValid() {
+ if !fieldValue.IsValid() {
// This should never happen
panic("field is not valid")
}
// If we can't set the field, then it is unexported or something,
// and we just continue onwards.
- if !field.CanSet() {
+ if !fieldValue.CanSet() {
continue
}
@@ -777,7 +783,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
}
- if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
+ if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
errors = appendErrors(errors, err)
}
}
diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore
new file mode 100644
index 000000000..c2bb6e4af
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/.gitignore
@@ -0,0 +1,31 @@
+# Created by https://www.gitignore.io/api/macos
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# End of https://www.gitignore.io/api/macos
diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
index 5fd623633..78be21cc8 100644
--- a/vendor/github.com/pierrec/lz4/.travis.yml
+++ b/vendor/github.com/pierrec/lz4/.travis.yml
@@ -1,9 +1,8 @@
language: go
go:
- - 1.4
- - 1.5
+ - 1.x
script:
- - go test -cpu=2
- - go test -cpu=2 -race \ No newline at end of file
+ - go test -v -cpu=2
+ - go test -v -cpu=2 -race \ No newline at end of file
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
index 6884bccdf..145eec270 100644
--- a/vendor/github.com/pierrec/lz4/block.go
+++ b/vendor/github.com/pierrec/lz4/block.go
@@ -3,7 +3,6 @@ package lz4
import (
"encoding/binary"
"errors"
- "unsafe"
)
// block represents a frame data block.
@@ -111,11 +110,6 @@ func UncompressBlock(src, dst []byte, di int) (int, error) {
}
}
-type hashEntry struct {
- generation uint
- value int
-}
-
// CompressBlock compresses the source buffer starting at soffet into the destination one.
// This is the fast version of LZ4 compression and also the default one.
//
@@ -123,27 +117,6 @@ type hashEntry struct {
//
// An error is returned if the destination buffer is too small.
func CompressBlock(src, dst []byte, soffset int) (int, error) {
- var hashTable [hashTableSize]hashEntry
- return compressGenerationalBlock(src, dst, soffset, 0, hashTable[:])
-}
-
-// getUint32 is a despicably evil function (well, for Go!) that takes advantage
-// of the machine's byte order to save some operations. This may look
-// inefficient but it is significantly faster on littleEndian machines,
-// which include x84, amd64, and some ARM processors.
-func getUint32(b []byte) uint32 {
- _ = b[3]
- if isLittleEndian {
- return *(*uint32)(unsafe.Pointer(&b))
- }
-
- return uint32(b[0]) |
- uint32(b[1])<<8 |
- uint32(b[2])<<16 |
- uint32(b[3])<<24
-}
-
-func compressGenerationalBlock(src, dst []byte, soffset int, generation uint, hashTable []hashEntry) (int, error) {
sn, dn := len(src)-mfLimit, len(dst)
if sn <= 0 || dn == 0 || soffset >= sn {
return 0, nil
@@ -152,28 +125,26 @@ func compressGenerationalBlock(src, dst []byte, soffset int, generation uint, ha
// fast scan strategy:
// we only need a hash table to store the last sequences (4 bytes)
+ var hashTable [1 << hashLog]int
var hashShift = uint((minMatch * 8) - hashLog)
// Initialise the hash table with the first 64Kb of the input buffer
// (used when compressing dependent blocks)
for si < soffset {
- h := getUint32(src[si:]) * hasher >> hashShift
+ h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
si++
- hashTable[h] = hashEntry{generation, si}
+ hashTable[h] = si
}
anchor := si
fma := 1 << skipStrength
for si < sn-minMatch {
// hash the next 4 bytes (sequence)...
- h := getUint32(src[si:]) * hasher >> hashShift
- if hashTable[h].generation != generation {
- hashTable[h] = hashEntry{generation, 0}
- }
+ h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
// -1 to separate existing entries from new ones
- ref := hashTable[h].value - 1
+ ref := hashTable[h] - 1
// ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving)
- hashTable[h].value = si + 1
+ hashTable[h] = si + 1
// no need to check the last 3 bytes in the first literal 4 bytes as
// this guarantees that the next match, if any, is compressed with
// a lower size, since to have some compression we must have:
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
index 46389243b..ddb82f66f 100644
--- a/vendor/github.com/pierrec/lz4/lz4.go
+++ b/vendor/github.com/pierrec/lz4/lz4.go
@@ -20,7 +20,6 @@ package lz4
import (
"hash"
"sync"
- "unsafe"
"github.com/pierrec/xxHash/xxHash32"
)
@@ -65,18 +64,6 @@ func init() {
}
}
-var isLittleEndian = getIsLittleEndian()
-
-func getIsLittleEndian() (ret bool) {
- var i int = 0x1
- bs := (*[1]byte)(unsafe.Pointer(&i))
- if bs[0] == 0 {
- return false
- }
-
- return true
-}
-
// Header describes the various flags that can be set on a Writer or obtained from a Reader.
// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
//
diff --git a/vendor/github.com/pierrec/lz4/lz4_test.go b/vendor/github.com/pierrec/lz4/lz4_test.go
index 2eb5b6bc1..9683b94b4 100644
--- a/vendor/github.com/pierrec/lz4/lz4_test.go
+++ b/vendor/github.com/pierrec/lz4/lz4_test.go
@@ -8,6 +8,7 @@ import (
"io"
"io/ioutil"
"math/big"
+ "os"
"reflect"
"testing"
@@ -261,6 +262,25 @@ func TestBlock(t *testing.T) {
}
}
+func TestBlockCompression(t *testing.T) {
+ input := make([]byte, 64*1024)
+
+ for i := 0; i < 64*1024; i += 1 {
+ input[i] = byte(i & 0x1)
+ }
+ output := make([]byte, 64*1024)
+
+ c, err := lz4.CompressBlock(input, output, 0)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if c == 0 {
+ t.Fatal("cannot compress compressible data")
+ }
+}
+
func BenchmarkUncompressBlock(b *testing.B) {
d := make([]byte, len(lorem))
z := make([]byte, len(lorem))
@@ -395,57 +415,60 @@ func TestReset(t *testing.T) {
func TestFrame(t *testing.T) {
for _, tdata := range testDataItems {
data := tdata.data
- // test various options
- for _, headerItem := range testHeaderItems {
- tag := tdata.label + ": " + headerItem.label
- rw := bytes.NewBuffer(nil)
-
- // Set all options to non default values and compress
- w := lz4.NewWriter(rw)
- w.Header = headerItem.header
+ t.Run(tdata.label, func(t *testing.T) {
+ t.Parallel()
+ // test various options
+ for _, headerItem := range testHeaderItems {
+ tag := tdata.label + ": " + headerItem.label
+ rw := bytes.NewBuffer(nil)
+
+ // Set all options to non default values and compress
+ w := lz4.NewWriter(rw)
+ w.Header = headerItem.header
+
+ n, err := w.Write(data)
+ if err != nil {
+ t.Errorf("%s: Write(): unexpected error: %v", tag, err)
+ t.FailNow()
+ }
+ if n != len(data) {
+ t.Errorf("%s: Write(): expected %d bytes written, got %d", tag, len(data), n)
+ t.FailNow()
+ }
+ if err = w.Close(); err != nil {
+ t.Errorf("%s: Close(): unexpected error: %v", tag, err)
+ t.FailNow()
+ }
- n, err := w.Write(data)
- if err != nil {
- t.Errorf("%s: Write(): unexpected error: %v", tag, err)
- t.FailNow()
- }
- if n != len(data) {
- t.Errorf("%s: Write(): expected %d bytes written, got %d", tag, len(data), n)
- t.FailNow()
- }
- if err = w.Close(); err != nil {
- t.Errorf("%s: Close(): unexpected error: %v", tag, err)
- t.FailNow()
- }
+ // Decompress
+ r := lz4.NewReader(rw)
+ n, err = r.Read(nil)
+ if err != nil {
+ t.Errorf("%s: Read(): unexpected error: %v", tag, err)
+ t.FailNow()
+ }
+ if n != 0 {
+ t.Errorf("%s: Read(): expected 0 bytes read, got %d", tag, n)
+ }
- // Decompress
- r := lz4.NewReader(rw)
- n, err = r.Read(nil)
- if err != nil {
- t.Errorf("%s: Read(): unexpected error: %v", tag, err)
- t.FailNow()
- }
- if n != 0 {
- t.Errorf("%s: Read(): expected 0 bytes read, got %d", tag, n)
- }
+ buf := make([]byte, len(data))
+ n, err = r.Read(buf)
+ if err != nil && err != io.EOF {
+ t.Errorf("%s: Read(): unexpected error: %v", tag, err)
+ t.FailNow()
+ }
+ if n != len(data) {
+ t.Errorf("%s: Read(): expected %d bytes read, got %d", tag, len(data), n)
+ }
+ buf = buf[:n]
+ if !bytes.Equal(buf, data) {
+ t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, len(buf), len(data))
+ t.FailNow()
+ }
- buf := make([]byte, len(data))
- n, err = r.Read(buf)
- if err != nil && err != io.EOF {
- t.Errorf("%s: Read(): unexpected error: %v", tag, err)
- t.FailNow()
+ compareHeaders(w.Header, r.Header, t)
}
- if n != len(data) {
- t.Errorf("%s: Read(): expected %d bytes read, got %d", tag, len(data), n)
- }
- buf = buf[:n]
- if !bytes.Equal(buf, data) {
- t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, len(buf), len(data))
- t.FailNow()
- }
-
- compareHeaders(w.Header, r.Header, t)
- }
+ })
}
}
@@ -454,76 +477,82 @@ func TestReadFromWriteTo(t *testing.T) {
for _, tdata := range testDataItems {
data := tdata.data
- // test various options
- for _, headerItem := range testHeaderItems {
- tag := "ReadFromWriteTo: " + tdata.label + ": " + headerItem.label
- dbuf := bytes.NewBuffer(data)
-
- zbuf := bytes.NewBuffer(nil)
- w := lz4.NewWriter(zbuf)
- w.Header = headerItem.header
- if _, err := w.ReadFrom(dbuf); err != nil {
- t.Errorf("%s: unexpected error: %s", tag, err)
- t.FailNow()
- }
+ t.Run(tdata.label, func(t *testing.T) {
+ t.Parallel()
+ // test various options
+ for _, headerItem := range testHeaderItems {
+ tag := "ReadFromWriteTo: " + tdata.label + ": " + headerItem.label
+ dbuf := bytes.NewBuffer(data)
+
+ zbuf := bytes.NewBuffer(nil)
+ w := lz4.NewWriter(zbuf)
+ w.Header = headerItem.header
+ if _, err := w.ReadFrom(dbuf); err != nil {
+ t.Errorf("%s: unexpected error: %s", tag, err)
+ t.FailNow()
+ }
- if err := w.Close(); err != nil {
- t.Errorf("%s: unexpected error: %s", tag, err)
- t.FailNow()
- }
+ if err := w.Close(); err != nil {
+ t.Errorf("%s: unexpected error: %s", tag, err)
+ t.FailNow()
+ }
- buf := bytes.NewBuffer(nil)
- r := lz4.NewReader(zbuf)
- if _, err := r.WriteTo(buf); err != nil {
- t.Errorf("%s: unexpected error: %s", tag, err)
- t.FailNow()
- }
+ buf := bytes.NewBuffer(nil)
+ r := lz4.NewReader(zbuf)
+ if _, err := r.WriteTo(buf); err != nil {
+ t.Errorf("%s: unexpected error: %s", tag, err)
+ t.FailNow()
+ }
- if !bytes.Equal(buf.Bytes(), data) {
- t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, buf.Len(), len(data))
- t.FailNow()
+ if !bytes.Equal(buf.Bytes(), data) {
+ t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, buf.Len(), len(data))
+ t.FailNow()
+ }
}
- }
+ })
}
}
// TestCopy will use io.Copy and avoid using Reader.WriteTo() and Writer.ReadFrom().
func TestCopy(t *testing.T) {
- w := lz4.NewWriter(nil)
- r := lz4.NewReader(nil)
for _, tdata := range testDataItems {
data := tdata.data
+ t.Run(tdata.label, func(t *testing.T) {
+ t.Parallel()
+
+ w := lz4.NewWriter(nil)
+ r := lz4.NewReader(nil)
+ // test various options
+ for _, headerItem := range testHeaderItems {
+ tag := "io.Copy: " + tdata.label + ": " + headerItem.label
+ dbuf := &testBuffer{bytes.NewBuffer(data)}
+
+ zbuf := bytes.NewBuffer(nil)
+ w.Reset(zbuf)
+ w.Header = headerItem.header
+ if _, err := io.Copy(w, dbuf); err != nil {
+ t.Errorf("%s: unexpected error: %s", tag, err)
+ t.FailNow()
+ }
- // test various options
- for _, headerItem := range testHeaderItems {
- tag := "io.Copy: " + tdata.label + ": " + headerItem.label
- dbuf := &testBuffer{bytes.NewBuffer(data)}
-
- zbuf := bytes.NewBuffer(nil)
- w.Reset(zbuf)
- w.Header = headerItem.header
- if _, err := io.Copy(w, dbuf); err != nil {
- t.Errorf("%s: unexpected error: %s", tag, err)
- t.FailNow()
- }
-
- if err := w.Close(); err != nil {
- t.Errorf("%s: unexpected error: %s", tag, err)
- t.FailNow()
- }
+ if err := w.Close(); err != nil {
+ t.Errorf("%s: unexpected error: %s", tag, err)
+ t.FailNow()
+ }
- buf := &testBuffer{bytes.NewBuffer(nil)}
- r.Reset(zbuf)
- if _, err := io.Copy(buf, r); err != nil {
- t.Errorf("%s: unexpected error: %s", tag, err)
- t.FailNow()
- }
+ buf := &testBuffer{bytes.NewBuffer(nil)}
+ r.Reset(zbuf)
+ if _, err := io.Copy(buf, r); err != nil {
+ t.Errorf("%s: unexpected error: %s", tag, err)
+ t.FailNow()
+ }
- if !bytes.Equal(buf.Bytes(), data) {
- t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, buf.Len(), len(data))
- t.FailNow()
+ if !bytes.Equal(buf.Bytes(), data) {
+ t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, buf.Len(), len(data))
+ t.FailNow()
+ }
}
- }
+ })
}
}
@@ -644,3 +673,26 @@ func writeReadChunked(t *testing.T, in []byte, chunkSize int) []byte {
}
return out
}
+
+func TestMultiBlockWrite(t *testing.T) {
+ f, err := os.Open("testdata/207326ba-36f8-11e7-954a-aca46ba8ca73.png")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ zbuf := bytes.NewBuffer(nil)
+ zw := lz4.NewWriter(zbuf)
+ if _, err := io.Copy(zw, f); err != nil {
+ t.Fatal(err)
+ }
+ if err := zw.Flush(); err != nil {
+ t.Fatal(err)
+ }
+
+ buf := bytes.NewBuffer(nil)
+ zr := lz4.NewReader(zbuf)
+ if _, err := io.Copy(buf, zr); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
index 11082f5a6..b1b712fe2 100644
--- a/vendor/github.com/pierrec/lz4/writer.go
+++ b/vendor/github.com/pierrec/lz4/writer.go
@@ -16,10 +16,8 @@ type Writer struct {
data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with
window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer
- zbCompressBuf []byte // buffer for compressing lz4 blocks
- writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock
- hashTable []hashEntry
- currentGeneration uint
+ zbCompressBuf []byte // buffer for compressing lz4 blocks
+ writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock
}
// NewWriter returns a new LZ4 frame encoder.
@@ -33,7 +31,6 @@ func NewWriter(dst io.Writer) *Writer {
Header: Header{
BlockMaxSize: 4 << 20,
},
- hashTable: make([]hashEntry, hashTableSize),
writeSizeBuf: make([]byte, 4),
}
}
@@ -245,11 +242,7 @@ func (z *Writer) compressBlock(zb block) block {
if z.HighCompression {
n, err = CompressBlockHC(zb.data, zbuf, zb.offset)
} else {
- n, err = compressGenerationalBlock(zb.data, zbuf, zb.offset, z.currentGeneration, z.hashTable)
- z.currentGeneration++
- if z.currentGeneration == 0 { // wrapped around, reset table
- z.hashTable = make([]hashEntry, hashTableSize)
- }
+ n, err = CompressBlock(zb.data, zbuf, zb.offset)
}
// compressible and compressed size smaller than decompressed: ok!
@@ -257,6 +250,7 @@ func (z *Writer) compressBlock(zb block) block {
zb.compressed = true
zb.zdata = zbuf[:n]
} else {
+ zb.compressed = false
zb.zdata = zb.data[zb.offset:]
}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
index 2df997ce1..b684a5b55 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -13,46 +13,46 @@ import (
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
type ProcLimits struct {
// CPU time limit in seconds.
- CPUTime int
+ CPUTime int64
// Maximum size of files that the process may create.
- FileSize int
+ FileSize int64
// Maximum size of the process's data segment (initialized data,
// uninitialized data, and heap).
- DataSize int
+ DataSize int64
// Maximum size of the process stack in bytes.
- StackSize int
+ StackSize int64
// Maximum size of a core file.
- CoreFileSize int
+ CoreFileSize int64
// Limit of the process's resident set in pages.
- ResidentSet int
+ ResidentSet int64
// Maximum number of processes that can be created for the real user ID of
// the calling process.
- Processes int
+ Processes int64
// Value one greater than the maximum file descriptor number that can be
// opened by this process.
- OpenFiles int
+ OpenFiles int64
// Maximum number of bytes of memory that may be locked into RAM.
- LockedMemory int
+ LockedMemory int64
// Maximum size of the process's virtual memory address space in bytes.
- AddressSpace int
+ AddressSpace int64
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
// this process may establish.
- FileLocks int
+ FileLocks int64
// Limit of signals that may be queued for the real user ID of the calling
// process.
- PendingSignals int
+ PendingSignals int64
// Limit on the number of bytes that can be allocated for POSIX message
// queues for the real user ID of the calling process.
- MsqqueueSize int
+ MsqqueueSize int64
// Limit of the nice priority set using setpriority(2) or nice(2).
- NicePriority int
+ NicePriority int64
// Limit of the real-time priority set using sched_setscheduler(2) or
// sched_setparam(2).
- RealtimePriority int
+ RealtimePriority int64
// Limit (in microseconds) on the amount of CPU time that a process
// scheduled under a real-time scheduling policy may consume without making
// a blocking system call.
- RealtimeTimeout int
+ RealtimeTimeout int64
}
const (
@@ -125,13 +125,13 @@ func (p Proc) NewLimits() (ProcLimits, error) {
return l, s.Err()
}
-func parseInt(s string) (int, error) {
+func parseInt(s string) (int64, error) {
if s == limitsUnlimited {
return -1, nil
}
- i, err := strconv.ParseInt(s, 10, 32)
+ i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
}
- return int(i), nil
+ return i, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits_test.go b/vendor/github.com/prometheus/procfs/proc_limits_test.go
index 70bf04ec2..ac62a3bb6 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits_test.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits_test.go
@@ -15,14 +15,14 @@ func TestNewLimits(t *testing.T) {
for _, test := range []struct {
name string
- want int
- have int
+ want int64
+ have int64
}{
{name: "cpu time", want: -1, have: l.CPUTime},
{name: "open files", want: 2048, have: l.OpenFiles},
{name: "msgqueue size", want: 819200, have: l.MsqqueueSize},
{name: "nice priority", want: 0, have: l.NicePriority},
- {name: "address space", want: -1, have: l.AddressSpace},
+ {name: "address space", want: 8589934592, have: l.AddressSpace},
} {
if test.want != test.have {
t.Errorf("want %s %d, have %d", test.name, test.want, test.have)