mirror of
https://github.com/hoernschen/dendrite.git
synced 2025-04-05 11:33:39 +00:00
Update sarama
This commit is contained in:
parent
39bc80e17e
commit
43b35aaea4
74 changed files with 3707 additions and 530 deletions
2
vendor/manifest
vendored
2
vendor/manifest
vendored
|
@ -428,7 +428,7 @@
|
|||
{
|
||||
"importpath": "gopkg.in/Shopify/sarama.v1",
|
||||
"repository": "https://gopkg.in/Shopify/sarama.v1",
|
||||
"revision": "0fb560e5f7fbcaee2f75e3c34174320709f69944",
|
||||
"revision": "3b1b38866a79f06deddf0487d5c27ba0697ccd65",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
|
|
111
vendor/src/gopkg.in/Shopify/sarama.v1/CHANGELOG.md
vendored
111
vendor/src/gopkg.in/Shopify/sarama.v1/CHANGELOG.md
vendored
|
@ -1,7 +1,118 @@
|
|||
# Changelog
|
||||
|
||||
#### Version 1.15.0 (2017-12-08)
|
||||
|
||||
New Features:
|
||||
- Claim official support for Kafka 1.0, though it did already work
|
||||
([#984](https://github.com/Shopify/sarama/pull/984)).
|
||||
- Helper methods for Kafka version numbers to/from strings
|
||||
([#989](https://github.com/Shopify/sarama/pull/989)).
|
||||
- Implement CreatePartitions request/response
|
||||
([#985](https://github.com/Shopify/sarama/pull/985)).
|
||||
|
||||
Improvements:
|
||||
- Add error codes 45-60
|
||||
([#986](https://github.com/Shopify/sarama/issues/986)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix slow consuming for certain Kafka 0.11/1.0 configurations
|
||||
([#982](https://github.com/Shopify/sarama/pull/982)).
|
||||
- Correctly determine when a FetchResponse contains the new message format
|
||||
([#990](https://github.com/Shopify/sarama/pull/990)).
|
||||
- Fix producing with multiple headers
|
||||
([#996](https://github.com/Shopify/sarama/pull/996)).
|
||||
- Fix handling of truncated record batches
|
||||
([#998](https://github.com/Shopify/sarama/pull/998)).
|
||||
- Fix leaking metrics when closing brokers
|
||||
([#991](https://github.com/Shopify/sarama/pull/991)).
|
||||
|
||||
#### Version 1.14.0 (2017-11-13)
|
||||
|
||||
New Features:
|
||||
- Add support for the new Kafka 0.11 record-batch format, including the wire
|
||||
protocol and the necessary behavioural changes in the producer and consumer.
|
||||
Transactions and idempotency are not yet supported, but producing and
|
||||
consuming should work with all the existing bells and whistles (batching,
|
||||
compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
|
||||
of Arista Networks for this work. Part of
|
||||
([#901](https://github.com/Shopify/sarama/issues/901)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix encoding of ProduceResponse versions in test
|
||||
([#970](https://github.com/Shopify/sarama/pull/970)).
|
||||
- Return partial replicas list when we have it
|
||||
([#975](https://github.com/Shopify/sarama/pull/975)).
|
||||
|
||||
#### Version 1.13.0 (2017-10-04)
|
||||
|
||||
New Features:
|
||||
- Support for FetchRequest version 3
|
||||
([#905](https://github.com/Shopify/sarama/pull/905)).
|
||||
- Permit setting version on mock FetchResponses
|
||||
([#939](https://github.com/Shopify/sarama/pull/939)).
|
||||
- Add a configuration option to support storing only minimal metadata for
|
||||
extremely large clusters
|
||||
([#937](https://github.com/Shopify/sarama/pull/937)).
|
||||
- Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
|
||||
([#932](https://github.com/Shopify/sarama/pull/932)).
|
||||
|
||||
Improvements:
|
||||
- Provide the block-level timestamp when consuming compressed messages
|
||||
([#885](https://github.com/Shopify/sarama/issues/885)).
|
||||
- `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
|
||||
by the broker, which can be meaningful
|
||||
([#930](https://github.com/Shopify/sarama/pull/930)).
|
||||
- Use a `Ticker` to reduce consumer timer overhead at the cost of higher
|
||||
variance in the actual timeout
|
||||
([#933](https://github.com/Shopify/sarama/pull/933)).
|
||||
|
||||
Bug Fixes:
|
||||
- Gracefully handle messages with negative timestamps
|
||||
([#907](https://github.com/Shopify/sarama/pull/907)).
|
||||
- Raise a proper error when encountering an unknown message version
|
||||
([#940](https://github.com/Shopify/sarama/pull/940)).
|
||||
|
||||
#### Version 1.12.0 (2017-05-08)
|
||||
|
||||
New Features:
|
||||
- Added support for the `ApiVersions` request and response pair, and Kafka
|
||||
version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
|
||||
that you still need to specify the Kafka version in the Sarama configuration
|
||||
for the time being.
|
||||
- Added a `Brokers` method to the Client which returns the complete set of
|
||||
active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
|
||||
- Added an `InSyncReplicas` method to the Client which returns the set of all
|
||||
in-sync broker IDs for the given partition, now that the Kafka versions for
|
||||
which this was misleading are no longer in our supported set
|
||||
([#872](https://github.com/Shopify/sarama/pull/872)).
|
||||
- Added a `NewCustomHashPartitioner` method which allows constructing a hash
|
||||
partitioner with a custom hash method in case the default (FNV-1a) is not
|
||||
suitable
|
||||
([#837](https://github.com/Shopify/sarama/pull/837),
|
||||
[#841](https://github.com/Shopify/sarama/pull/841)).
|
||||
|
||||
Improvements:
|
||||
- Recognize more Kafka error codes
|
||||
([#859](https://github.com/Shopify/sarama/pull/859)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix an issue where decoding a malformed FetchRequest would not return the
|
||||
correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
|
||||
- Respect ordering of group protocols in JoinGroupRequests. This fix is
|
||||
transparent if you're using the `AddGroupProtocol` or
|
||||
`AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
|
||||
the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
|
||||
([#812](https://github.com/Shopify/sarama/issues/812)).
|
||||
- Fix an alignment-related issue with atomics on 32-bit architectures
|
||||
([#859](https://github.com/Shopify/sarama/pull/859)).
|
||||
|
||||
#### Version 1.11.0 (2016-12-20)
|
||||
|
||||
_Important:_ As of Sarama 1.11 it is necessary to set the config value of
|
||||
`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
|
||||
versions would silently override this value when instantiating a SyncProducer
|
||||
which led to unexpected values and data races.
|
||||
|
||||
New Features:
|
||||
- Metrics! Thanks to Sébastien Launay for all his work on this feature
|
||||
([#701](https://github.com/Shopify/sarama/pull/701),
|
||||
|
|
10
vendor/src/gopkg.in/Shopify/sarama.v1/Makefile
vendored
10
vendor/src/gopkg.in/Shopify/sarama.v1/Makefile
vendored
|
@ -1,7 +1,15 @@
|
|||
default: fmt vet errcheck test
|
||||
|
||||
# Taken from https://github.com/codecov/example-go#caveat-multiple-files
|
||||
test:
|
||||
go test -v -timeout 60s -race ./...
|
||||
echo "" > coverage.txt
|
||||
for d in `go list ./... | grep -v vendor`; do \
|
||||
go test -v -timeout 60s -race -coverprofile=profile.out -covermode=atomic $$d; \
|
||||
if [ -f profile.out ]; then \
|
||||
cat profile.out >> coverage.txt; \
|
||||
rm profile.out; \
|
||||
fi \
|
||||
done
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
|
|
@ -3,6 +3,7 @@ sarama
|
|||
|
||||
[](https://godoc.org/github.com/Shopify/sarama)
|
||||
[](https://travis-ci.org/Shopify/sarama)
|
||||
[](https://codecov.io/gh/Shopify/sarama)
|
||||
|
||||
Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
|
||||
|
||||
|
@ -13,12 +14,14 @@ Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apa
|
|||
- The [examples](./examples) directory contains more elaborate example applications.
|
||||
- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
|
||||
|
||||
You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
|
||||
|
||||
### Compatibility and API stability
|
||||
|
||||
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
|
||||
the two latest stable releases of Kafka and Go, and we provide a two month
|
||||
grace period for older releases. This means we currently officially support
|
||||
Go 1.7 and 1.6, and Kafka 0.10.0 and 0.9.0, although older releases are
|
||||
Go 1.9 through 1.7, and Kafka 1.0 through 0.10, although older releases are
|
||||
still likely to work.
|
||||
|
||||
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
|
||||
|
@ -27,7 +30,7 @@ A changelog is available [here](CHANGELOG.md).
|
|||
|
||||
### Contributing
|
||||
|
||||
* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/CONTRIBUTING.md).
|
||||
* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
|
||||
* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
|
||||
technical and design details.
|
||||
* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
|
||||
|
|
|
@ -50,12 +50,13 @@ func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
r.Err = KError(kerr)
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -17,24 +18,23 @@ import (
|
|||
// scope.
|
||||
type AsyncProducer interface {
|
||||
|
||||
// AsyncClose triggers a shutdown of the producer, flushing any messages it may
|
||||
// have buffered. The shutdown has completed when both the Errors and Successes
|
||||
// channels have been closed. When calling AsyncClose, you *must* continue to
|
||||
// read from those channels in order to drain the results of any messages in
|
||||
// flight.
|
||||
// AsyncClose triggers a shutdown of the producer. The shutdown has completed
|
||||
// when both the Errors and Successes channels have been closed. When calling
|
||||
// AsyncClose, you *must* continue to read from those channels in order to
|
||||
// drain the results of any messages in flight.
|
||||
AsyncClose()
|
||||
|
||||
// Close shuts down the producer and flushes any messages it may have buffered.
|
||||
// You must call this function before a producer object passes out of scope, as
|
||||
// it may otherwise leak memory. You must call this before calling Close on the
|
||||
// underlying client.
|
||||
// Close shuts down the producer and waits for any buffered messages to be
|
||||
// flushed. You must call this function before a producer object passes out of
|
||||
// scope, as it may otherwise leak memory. You must call this before calling
|
||||
// Close on the underlying client.
|
||||
Close() error
|
||||
|
||||
// Input is the input channel for the user to write messages to that they
|
||||
// wish to send.
|
||||
Input() chan<- *ProducerMessage
|
||||
|
||||
// Successes is the success output channel back to the user when AckSuccesses is
|
||||
// Successes is the success output channel back to the user when Return.Successes is
|
||||
// enabled. If Return.Successes is true, you MUST read from this channel or the
|
||||
// Producer will deadlock. It is suggested that you send and read messages
|
||||
// together in a single select statement.
|
||||
|
@ -120,6 +120,10 @@ type ProducerMessage struct {
|
|||
// StringEncoder and ByteEncoder.
|
||||
Value Encoder
|
||||
|
||||
// The headers are key-value pairs that are transparently passed
|
||||
// by Kafka between producers and consumers.
|
||||
Headers []RecordHeader
|
||||
|
||||
// This field is used to hold arbitrary data you wish to include so it
|
||||
// will be available when receiving on the Successes and Errors channels.
|
||||
// Sarama completely ignores this field and is only to be used for
|
||||
|
@ -147,8 +151,16 @@ type ProducerMessage struct {
|
|||
|
||||
const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
|
||||
|
||||
func (m *ProducerMessage) byteSize() int {
|
||||
size := producerMessageOverhead
|
||||
func (m *ProducerMessage) byteSize(version int) int {
|
||||
var size int
|
||||
if version >= 2 {
|
||||
size = maximumRecordOverhead
|
||||
for _, h := range m.Headers {
|
||||
size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
|
||||
}
|
||||
} else {
|
||||
size = producerMessageOverhead
|
||||
}
|
||||
if m.Key != nil {
|
||||
size += m.Key.Length()
|
||||
}
|
||||
|
@ -200,7 +212,7 @@ func (p *asyncProducer) Close() error {
|
|||
|
||||
if p.conf.Producer.Return.Successes {
|
||||
go withRecover(func() {
|
||||
for _ = range p.successes {
|
||||
for range p.successes {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -255,7 +267,11 @@ func (p *asyncProducer) dispatcher() {
|
|||
p.inFlight.Add(1)
|
||||
}
|
||||
|
||||
if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
|
||||
version := 1
|
||||
if p.conf.Version.IsAtLeast(V0_11_0_0) {
|
||||
version = 2
|
||||
}
|
||||
if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
|
||||
p.returnError(msg, ErrMessageSizeTooLarge)
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ func closeProducer(t *testing.T, p AsyncProducer) {
|
|||
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
for _ = range p.Successes() {
|
||||
for range p.Successes() {
|
||||
t.Error("Unexpected message on Successes()")
|
||||
}
|
||||
wg.Done()
|
||||
|
@ -808,7 +808,7 @@ func ExampleAsyncProducer_goroutines() {
|
|||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for _ = range producer.Successes() {
|
||||
for range producer.Successes() {
|
||||
successes++
|
||||
}
|
||||
}()
|
||||
|
|
20
vendor/src/gopkg.in/Shopify/sarama.v1/broker.go
vendored
20
vendor/src/gopkg.in/Shopify/sarama.v1/broker.go
vendored
|
@ -52,7 +52,7 @@ type responsePromise struct {
|
|||
errors chan error
|
||||
}
|
||||
|
||||
// NewBroker creates and returns a Broker targetting the given host:port address.
|
||||
// NewBroker creates and returns a Broker targeting the given host:port address.
|
||||
// This does not attempt to actually connect, you have to call Open() for that.
|
||||
func NewBroker(addr string) *Broker {
|
||||
return &Broker{id: -1, addr: addr}
|
||||
|
@ -178,6 +178,13 @@ func (b *Broker) Close() error {
|
|||
b.done = nil
|
||||
b.responses = nil
|
||||
|
||||
if b.id >= 0 {
|
||||
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b))
|
||||
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b))
|
||||
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b))
|
||||
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b))
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
Logger.Printf("Closed connection to broker %s\n", b.addr)
|
||||
} else {
|
||||
|
@ -355,6 +362,17 @@ func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroups
|
|||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
|
||||
response := new(ApiVersionsResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
|
|
@ -77,10 +77,6 @@ func TestSimpleBrokerCommunication(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
tt.runner(t, broker)
|
||||
err = broker.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// Wait up to 500 ms for the remote broker to process the request and
|
||||
// notify us about the metrics
|
||||
timeout := 500 * time.Millisecond
|
||||
|
@ -91,6 +87,10 @@ func TestSimpleBrokerCommunication(t *testing.T) {
|
|||
t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout)
|
||||
}
|
||||
mb.Close()
|
||||
err = broker.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -284,6 +284,19 @@ var brokerTestTable = []struct {
|
|||
t.Error("DescribeGroups request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"ApiVersionsRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ApiVersionsRequest{}
|
||||
response, err := broker.ApiVersions(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("ApiVersions request got no response!")
|
||||
}
|
||||
}},
|
||||
}
|
||||
|
||||
func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics brokerMetrics) {
|
||||
|
|
78
vendor/src/gopkg.in/Shopify/sarama.v1/client.go
vendored
78
vendor/src/gopkg.in/Shopify/sarama.v1/client.go
vendored
|
@ -17,6 +17,9 @@ type Client interface {
|
|||
// altered after it has been created.
|
||||
Config() *Config
|
||||
|
||||
// Brokers returns the current set of active brokers as retrieved from cluster metadata.
|
||||
Brokers() []*Broker
|
||||
|
||||
// Topics returns the set of available topics as retrieved from cluster metadata.
|
||||
Topics() ([]string, error)
|
||||
|
||||
|
@ -35,15 +38,20 @@ type Client interface {
|
|||
// Replicas returns the set of all replica IDs for the given partition.
|
||||
Replicas(topic string, partitionID int32) ([]int32, error)
|
||||
|
||||
// InSyncReplicas returns the set of all in-sync replica IDs for the given
|
||||
// partition. In-sync replicas are replicas which are fully caught up with
|
||||
// the partition leader.
|
||||
InSyncReplicas(topic string, partitionID int32) ([]int32, error)
|
||||
|
||||
// RefreshMetadata takes a list of topics and queries the cluster to refresh the
|
||||
// available metadata for those topics. If no topics are provided, it will refresh
|
||||
// metadata for all topics.
|
||||
RefreshMetadata(topics ...string) error
|
||||
|
||||
// GetOffset queries the cluster to get the most recent available offset at the
|
||||
// given time on the topic/partition combination. Time should be OffsetOldest for
|
||||
// the earliest available offset, OffsetNewest for the offset of the message that
|
||||
// will be produced next, or a time.
|
||||
// given time (in milliseconds) on the topic/partition combination.
|
||||
// Time should be OffsetOldest for the earliest available offset,
|
||||
// OffsetNewest for the offset of the message that will be produced next, or a time.
|
||||
GetOffset(topic string, partitionID int32, time int64) (int64, error)
|
||||
|
||||
// Coordinator returns the coordinating broker for a consumer group. It will
|
||||
|
@ -133,7 +141,8 @@ func NewClient(addrs []string, conf *Config) (Client, error) {
|
|||
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
|
||||
}
|
||||
|
||||
// do an initial fetch of all cluster metadata by specifing an empty list of topics
|
||||
if conf.Metadata.Full {
|
||||
// do an initial fetch of all cluster metadata by specifying an empty list of topics
|
||||
err := client.RefreshMetadata()
|
||||
switch err {
|
||||
case nil:
|
||||
|
@ -146,6 +155,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) {
|
|||
_ = client.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
go withRecover(client.backgroundMetadataUpdater)
|
||||
|
||||
Logger.Println("Successfully initialized new client")
|
||||
|
@ -157,6 +167,16 @@ func (client *client) Config() *Config {
|
|||
return client.conf
|
||||
}
|
||||
|
||||
func (client *client) Brokers() []*Broker {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
brokers := make([]*Broker, 0)
|
||||
for _, broker := range client.brokers {
|
||||
brokers = append(brokers, broker)
|
||||
}
|
||||
return brokers
|
||||
}
|
||||
|
||||
func (client *client) Close() error {
|
||||
if client.Closed() {
|
||||
// Chances are this is being called from a defer() and the error will go unobserved
|
||||
|
@ -277,9 +297,34 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error)
|
|||
}
|
||||
|
||||
if metadata.Err == ErrReplicaNotAvailable {
|
||||
return nil, metadata.Err
|
||||
return dupInt32Slice(metadata.Replicas), metadata.Err
|
||||
}
|
||||
return dupeAndSort(metadata.Replicas), nil
|
||||
return dupInt32Slice(metadata.Replicas), nil
|
||||
}
|
||||
|
||||
func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
metadata := client.cachedMetadata(topic, partitionID)
|
||||
|
||||
if metadata == nil {
|
||||
err := client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metadata = client.cachedMetadata(topic, partitionID)
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
if metadata.Err == ErrReplicaNotAvailable {
|
||||
return dupInt32Slice(metadata.Isr), metadata.Err
|
||||
}
|
||||
return dupInt32Slice(metadata.Isr), nil
|
||||
}
|
||||
|
||||
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
|
||||
|
@ -562,7 +607,20 @@ func (client *client) backgroundMetadataUpdater() {
|
|||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := client.RefreshMetadata(); err != nil {
|
||||
topics := []string{}
|
||||
if !client.conf.Metadata.Full {
|
||||
if specificTopics, err := client.Topics(); err != nil {
|
||||
Logger.Println("Client background metadata topic load:", err)
|
||||
break
|
||||
} else if len(specificTopics) == 0 {
|
||||
Logger.Println("Client background metadata update: no specific topics to update")
|
||||
break
|
||||
} else {
|
||||
topics = specificTopics
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.RefreshMetadata(topics...); err != nil {
|
||||
Logger.Println("Client background metadata update:", err)
|
||||
}
|
||||
case <-client.closer:
|
||||
|
@ -592,12 +650,12 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int)
|
|||
switch err.(type) {
|
||||
case nil:
|
||||
// valid response, use it
|
||||
if shouldRetry, err := client.updateMetadata(response); shouldRetry {
|
||||
shouldRetry, err := client.updateMetadata(response)
|
||||
if shouldRetry {
|
||||
Logger.Println("client/metadata found some partitions to be leaderless")
|
||||
return retry(err) // note: err can be nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
|
||||
case PacketEncodingError:
|
||||
// didn't even send, return the error
|
||||
|
|
|
@ -188,12 +188,23 @@ func TestClientMetadata(t *testing.T) {
|
|||
replicas, err = client.Replicas("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if replicas[0] != 1 {
|
||||
t.Error("Incorrect (or unsorted) replica")
|
||||
} else if replicas[1] != 3 {
|
||||
t.Error("Incorrect (or unsorted) replica")
|
||||
} else if replicas[0] != 3 {
|
||||
t.Error("Incorrect (or sorted) replica")
|
||||
} else if replicas[1] != 1 {
|
||||
t.Error("Incorrect (or sorted) replica")
|
||||
} else if replicas[2] != 5 {
|
||||
t.Error("Incorrect (or unsorted) replica")
|
||||
t.Error("Incorrect (or sorted) replica")
|
||||
}
|
||||
|
||||
isr, err = client.InSyncReplicas("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if len(isr) != 2 {
|
||||
t.Error("Client returned incorrect ISRs for partition:", isr)
|
||||
} else if isr[0] != 5 {
|
||||
t.Error("Incorrect (or sorted) ISR:", isr)
|
||||
} else if isr[1] != 1 {
|
||||
t.Error("Incorrect (or sorted) ISR:", isr)
|
||||
}
|
||||
|
||||
leader.Close()
|
||||
|
|
37
vendor/src/gopkg.in/Shopify/sarama.v1/config.go
vendored
37
vendor/src/gopkg.in/Shopify/sarama.v1/config.go
vendored
|
@ -72,6 +72,12 @@ type Config struct {
|
|||
// Defaults to 10 minutes. Set to 0 to disable. Similar to
|
||||
// `topic.metadata.refresh.interval.ms` in the JVM version.
|
||||
RefreshFrequency time.Duration
|
||||
|
||||
// Whether to maintain a full set of metadata for all topics, or just
|
||||
// the minimal set that has been necessary so far. The full set is simpler
|
||||
// and usually more convenient, but can take up a substantial amount of
|
||||
// memory if you have many topics and partitions. Defaults to true.
|
||||
Full bool
|
||||
}
|
||||
|
||||
// Producer is the namespace for configuration related to producing messages,
|
||||
|
@ -99,7 +105,10 @@ type Config struct {
|
|||
Partitioner PartitionerConstructor
|
||||
|
||||
// Return specifies what channels will be populated. If they are set to true,
|
||||
// you must read from the respective channels to prevent deadlock.
|
||||
// you must read from the respective channels to prevent deadlock. If,
|
||||
// however, this config is used to create a `SyncProducer`, both must be set
|
||||
// to true and you shall not read from the channels since the producer does
|
||||
// this internally.
|
||||
Return struct {
|
||||
// If enabled, successfully delivered messages will be returned on the
|
||||
// Successes channel (default disabled).
|
||||
|
@ -187,11 +196,23 @@ type Config struct {
|
|||
// Equivalent to the JVM's `fetch.wait.max.ms`.
|
||||
MaxWaitTime time.Duration
|
||||
|
||||
// The maximum amount of time the consumer expects a message takes to process
|
||||
// for the user. If writing to the Messages channel takes longer than this,
|
||||
// that partition will stop fetching more messages until it can proceed again.
|
||||
// The maximum amount of time the consumer expects a message takes to
|
||||
// process for the user. If writing to the Messages channel takes longer
|
||||
// than this, that partition will stop fetching more messages until it
|
||||
// can proceed again.
|
||||
// Note that, since the Messages channel is buffered, the actual grace time is
|
||||
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
|
||||
// If a message is not written to the Messages channel between two ticks
|
||||
// of the expiryTicker then a timeout is detected.
|
||||
// Using a ticker instead of a timer to detect timeouts should typically
|
||||
// result in many fewer calls to Timer functions which may result in a
|
||||
// significant performance improvement if many messages are being sent
|
||||
// and timeouts are infrequent.
|
||||
// The disadvantage of using a ticker instead of a timer is that
|
||||
// timeouts will be less accurate. That is, the effective timeout could
|
||||
// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
|
||||
// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
|
||||
// between two messages being sent may not be recognized as a timeout.
|
||||
MaxProcessingTime time.Duration
|
||||
|
||||
// Return specifies what channels will be populated. If they are set to true,
|
||||
|
@ -260,6 +281,7 @@ func NewConfig() *Config {
|
|||
c.Metadata.Retry.Max = 3
|
||||
c.Metadata.Retry.Backoff = 250 * time.Millisecond
|
||||
c.Metadata.RefreshFrequency = 10 * time.Minute
|
||||
c.Metadata.Full = true
|
||||
|
||||
c.Producer.MaxMessageBytes = 1000000
|
||||
c.Producer.RequiredAcks = WaitForLocal
|
||||
|
@ -305,10 +327,13 @@ func (c *Config) Validate() error {
|
|||
Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
|
||||
}
|
||||
if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
|
||||
Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.")
|
||||
Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
|
||||
}
|
||||
if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
|
||||
Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.")
|
||||
Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
|
||||
}
|
||||
if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
|
||||
Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
|
||||
}
|
||||
if c.Producer.Timeout%time.Millisecond != 0 {
|
||||
Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
|
||||
|
|
163
vendor/src/gopkg.in/Shopify/sarama.v1/config_test.go
vendored
163
vendor/src/gopkg.in/Shopify/sarama.v1/config_test.go
vendored
|
@ -33,6 +33,169 @@ func TestEmptyClientIDConfigValidates(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNetConfigValidates(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cfg func(*Config) // resorting to using a function as a param because of internal composite structs
|
||||
err string
|
||||
}{
|
||||
{
|
||||
"OpenRequests",
|
||||
func(cfg *Config) {
|
||||
cfg.Net.MaxOpenRequests = 0
|
||||
},
|
||||
"Net.MaxOpenRequests must be > 0"},
|
||||
{"DialTimeout",
|
||||
func(cfg *Config) {
|
||||
cfg.Net.DialTimeout = 0
|
||||
},
|
||||
"Net.DialTimeout must be > 0"},
|
||||
{"ReadTimeout",
|
||||
func(cfg *Config) {
|
||||
cfg.Net.ReadTimeout = 0
|
||||
},
|
||||
"Net.ReadTimeout must be > 0"},
|
||||
{"WriteTimeout",
|
||||
func(cfg *Config) {
|
||||
cfg.Net.WriteTimeout = 0
|
||||
},
|
||||
"Net.WriteTimeout must be > 0"},
|
||||
{"KeepAlive",
|
||||
func(cfg *Config) {
|
||||
cfg.Net.KeepAlive = -1
|
||||
},
|
||||
"Net.KeepAlive must be >= 0"},
|
||||
{"SASL.User",
|
||||
func(cfg *Config) {
|
||||
cfg.Net.SASL.Enable = true
|
||||
cfg.Net.SASL.User = ""
|
||||
},
|
||||
"Net.SASL.User must not be empty when SASL is enabled"},
|
||||
{"SASL.Password",
|
||||
func(cfg *Config) {
|
||||
cfg.Net.SASL.Enable = true
|
||||
cfg.Net.SASL.User = "user"
|
||||
cfg.Net.SASL.Password = ""
|
||||
},
|
||||
"Net.SASL.Password must not be empty when SASL is enabled"},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
c := NewConfig()
|
||||
test.cfg(c)
|
||||
if err := c.Validate(); string(err.(ConfigurationError)) != test.err {
|
||||
t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataConfigValidates(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cfg func(*Config) // resorting to using a function as a param because of internal composite structs
|
||||
err string
|
||||
}{
|
||||
{
|
||||
"Retry.Max",
|
||||
func(cfg *Config) {
|
||||
cfg.Metadata.Retry.Max = -1
|
||||
},
|
||||
"Metadata.Retry.Max must be >= 0"},
|
||||
{"Retry.Backoff",
|
||||
func(cfg *Config) {
|
||||
cfg.Metadata.Retry.Backoff = -1
|
||||
},
|
||||
"Metadata.Retry.Backoff must be >= 0"},
|
||||
{"RefreshFrequency",
|
||||
func(cfg *Config) {
|
||||
cfg.Metadata.RefreshFrequency = -1
|
||||
},
|
||||
"Metadata.RefreshFrequency must be >= 0"},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
c := NewConfig()
|
||||
test.cfg(c)
|
||||
if err := c.Validate(); string(err.(ConfigurationError)) != test.err {
|
||||
t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProducerConfigValidates(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cfg func(*Config) // resorting to using a function as a param because of internal composite structs
|
||||
err string
|
||||
}{
|
||||
{
|
||||
"MaxMessageBytes",
|
||||
func(cfg *Config) {
|
||||
cfg.Producer.MaxMessageBytes = 0
|
||||
},
|
||||
"Producer.MaxMessageBytes must be > 0"},
|
||||
{"RequiredAcks",
|
||||
func(cfg *Config) {
|
||||
cfg.Producer.RequiredAcks = -2
|
||||
},
|
||||
"Producer.RequiredAcks must be >= -1"},
|
||||
{"Timeout",
|
||||
func(cfg *Config) {
|
||||
cfg.Producer.Timeout = 0
|
||||
},
|
||||
"Producer.Timeout must be > 0"},
|
||||
{"Partitioner",
|
||||
func(cfg *Config) {
|
||||
cfg.Producer.Partitioner = nil
|
||||
},
|
||||
"Producer.Partitioner must not be nil"},
|
||||
{"Flush.Bytes",
|
||||
func(cfg *Config) {
|
||||
cfg.Producer.Flush.Bytes = -1
|
||||
},
|
||||
"Producer.Flush.Bytes must be >= 0"},
|
||||
{"Flush.Messages",
|
||||
func(cfg *Config) {
|
||||
cfg.Producer.Flush.Messages = -1
|
||||
},
|
||||
"Producer.Flush.Messages must be >= 0"},
|
||||
{"Flush.Frequency",
|
||||
func(cfg *Config) {
|
||||
cfg.Producer.Flush.Frequency = -1
|
||||
},
|
||||
"Producer.Flush.Frequency must be >= 0"},
|
||||
{"Flush.MaxMessages",
|
||||
func(cfg *Config) {
|
||||
cfg.Producer.Flush.MaxMessages = -1
|
||||
},
|
||||
"Producer.Flush.MaxMessages must be >= 0"},
|
||||
{"Flush.MaxMessages with Producer.Flush.Messages",
|
||||
func(cfg *Config) {
|
||||
cfg.Producer.Flush.MaxMessages = 1
|
||||
cfg.Producer.Flush.Messages = 2
|
||||
},
|
||||
"Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set"},
|
||||
{"Flush.Retry.Max",
|
||||
func(cfg *Config) {
|
||||
cfg.Producer.Retry.Max = -1
|
||||
},
|
||||
"Producer.Retry.Max must be >= 0"},
|
||||
{"Flush.Retry.Backoff",
|
||||
func(cfg *Config) {
|
||||
cfg.Producer.Retry.Backoff = -1
|
||||
},
|
||||
"Producer.Retry.Backoff must be >= 0"},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
c := NewConfig()
|
||||
test.cfg(c)
|
||||
if err := c.Validate(); string(err.(ConfigurationError)) != test.err {
|
||||
t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLZ4ConfigValidation(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.Producer.Compression = CompressionLZ4
|
||||
|
|
201
vendor/src/gopkg.in/Shopify/sarama.v1/consumer.go
vendored
201
vendor/src/gopkg.in/Shopify/sarama.v1/consumer.go
vendored
|
@ -14,7 +14,9 @@ type ConsumerMessage struct {
|
|||
Topic string
|
||||
Partition int32
|
||||
Offset int64
|
||||
Timestamp time.Time // only set if kafka is version 0.10+
|
||||
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
|
||||
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
|
||||
Headers []*RecordHeader // only set if kafka is version 0.11+
|
||||
}
|
||||
|
||||
// ConsumerError is what is provided to the user when an error occurs.
|
||||
|
@ -246,9 +248,9 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
|
|||
|
||||
// PartitionConsumer
|
||||
|
||||
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close()
|
||||
// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically
|
||||
// when it passes out of scope.
|
||||
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
|
||||
// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
|
||||
// of scope.
|
||||
//
|
||||
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
|
||||
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
|
||||
|
@ -257,19 +259,25 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
|
|||
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
|
||||
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
|
||||
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
|
||||
//
|
||||
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
|
||||
// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
|
||||
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
|
||||
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
|
||||
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
|
||||
type PartitionConsumer interface {
|
||||
|
||||
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will
|
||||
// return immediately, after which you should wait until the 'messages' and
|
||||
// 'errors' channel are drained. It is required to call this function, or
|
||||
// Close before a consumer object passes out of scope, as it will otherwise
|
||||
// leak memory. You must call this before calling Close on the underlying client.
|
||||
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
|
||||
// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
|
||||
// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
|
||||
// this before calling Close on the underlying client.
|
||||
AsyncClose()
|
||||
|
||||
// Close stops the PartitionConsumer from fetching messages. It is required to
|
||||
// call this function (or AsyncClose) before a consumer object passes out of
|
||||
// scope, as it will otherwise leak memory. You must call this before calling
|
||||
// Close on the underlying client.
|
||||
// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
|
||||
// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
|
||||
// the Messages channel when this function is called, you will be competing with Close for messages; consider
|
||||
// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
|
||||
// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
|
||||
Close() error
|
||||
|
||||
// Messages returns the read channel for the messages that are returned by
|
||||
|
@ -289,6 +297,7 @@ type PartitionConsumer interface {
|
|||
}
|
||||
|
||||
type partitionConsumer struct {
|
||||
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
consumer *consumer
|
||||
conf *Config
|
||||
topic string
|
||||
|
@ -304,7 +313,6 @@ type partitionConsumer struct {
|
|||
|
||||
fetchSize int32
|
||||
offset int64
|
||||
highWaterMarkOffset int64
|
||||
}
|
||||
|
||||
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
|
||||
|
@ -324,7 +332,7 @@ func (child *partitionConsumer) sendError(err error) {
|
|||
}
|
||||
|
||||
func (child *partitionConsumer) dispatcher() {
|
||||
for _ = range child.trigger {
|
||||
for range child.trigger {
|
||||
select {
|
||||
case <-child.dying:
|
||||
close(child.trigger)
|
||||
|
@ -411,7 +419,7 @@ func (child *partitionConsumer) Close() error {
|
|||
child.AsyncClose()
|
||||
|
||||
go withRecover(func() {
|
||||
for _ = range child.messages {
|
||||
for range child.messages {
|
||||
// drain
|
||||
}
|
||||
})
|
||||
|
@ -433,25 +441,20 @@ func (child *partitionConsumer) HighWaterMarkOffset() int64 {
|
|||
|
||||
func (child *partitionConsumer) responseFeeder() {
|
||||
var msgs []*ConsumerMessage
|
||||
expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime)
|
||||
expireTimedOut := false
|
||||
msgSent := false
|
||||
|
||||
feederLoop:
|
||||
for response := range child.feeder {
|
||||
msgs, child.responseResult = child.parseResponse(response)
|
||||
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
|
||||
|
||||
for i, msg := range msgs {
|
||||
if !expiryTimer.Stop() && !expireTimedOut {
|
||||
// expiryTimer was expired; clear out the waiting msg
|
||||
<-expiryTimer.C
|
||||
}
|
||||
expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime)
|
||||
expireTimedOut = false
|
||||
|
||||
messageSelect:
|
||||
select {
|
||||
case child.messages <- msg:
|
||||
case <-expiryTimer.C:
|
||||
expireTimedOut = true
|
||||
msgSent = true
|
||||
case <-expiryTicker.C:
|
||||
if !msgSent {
|
||||
child.responseResult = errTimedOut
|
||||
child.broker.acks.Done()
|
||||
for _, msg = range msgs[i:] {
|
||||
|
@ -459,9 +462,16 @@ feederLoop:
|
|||
}
|
||||
child.broker.input <- child
|
||||
continue feederLoop
|
||||
} else {
|
||||
// current message has not been sent, return to select
|
||||
// statement
|
||||
msgSent = false
|
||||
goto messageSelect
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
expiryTicker.Stop()
|
||||
child.broker.acks.Done()
|
||||
}
|
||||
|
||||
|
@ -469,44 +479,12 @@ feederLoop:
|
|||
close(child.errors)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
|
||||
block := response.GetBlock(child.topic, child.partition)
|
||||
if block == nil {
|
||||
return nil, ErrIncompleteResponse
|
||||
}
|
||||
|
||||
if block.Err != ErrNoError {
|
||||
return nil, block.Err
|
||||
}
|
||||
|
||||
if len(block.MsgSet.Messages) == 0 {
|
||||
// We got no messages. If we got a trailing one then we need to ask for more data.
|
||||
// Otherwise we just poll again and wait for one to be produced...
|
||||
if block.MsgSet.PartialTrailingMessage {
|
||||
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
|
||||
// we can't ask for more data, we've hit the configured limit
|
||||
child.sendError(ErrMessageTooLarge)
|
||||
child.offset++ // skip this one so we can keep processing future messages
|
||||
} else {
|
||||
child.fetchSize *= 2
|
||||
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
|
||||
child.fetchSize = child.conf.Consumer.Fetch.Max
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// we got messages, reset our fetch size in case it was increased for a previous request
|
||||
child.fetchSize = child.conf.Consumer.Fetch.Default
|
||||
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
|
||||
|
||||
incomplete := false
|
||||
prelude := true
|
||||
func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
|
||||
var messages []*ConsumerMessage
|
||||
for _, msgBlock := range block.MsgSet.Messages {
|
||||
var incomplete bool
|
||||
prelude := true
|
||||
|
||||
for _, msgBlock := range msgSet.Messages {
|
||||
for _, msg := range msgBlock.Messages() {
|
||||
offset := msg.Offset
|
||||
if msg.Msg.Version >= 1 {
|
||||
|
@ -526,6 +504,42 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
|
|||
Value: msg.Msg.Value,
|
||||
Offset: offset,
|
||||
Timestamp: msg.Msg.Timestamp,
|
||||
BlockTimestamp: msgBlock.Msg.Timestamp,
|
||||
})
|
||||
child.offset = offset + 1
|
||||
} else {
|
||||
incomplete = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if incomplete || len(messages) == 0 {
|
||||
return nil, ErrIncompleteResponse
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
|
||||
var messages []*ConsumerMessage
|
||||
var incomplete bool
|
||||
prelude := true
|
||||
|
||||
for _, rec := range batch.Records {
|
||||
offset := batch.FirstOffset + rec.OffsetDelta
|
||||
if prelude && offset < child.offset {
|
||||
continue
|
||||
}
|
||||
prelude = false
|
||||
|
||||
if offset >= child.offset {
|
||||
messages = append(messages, &ConsumerMessage{
|
||||
Topic: child.topic,
|
||||
Partition: child.partition,
|
||||
Key: rec.Key,
|
||||
Value: rec.Value,
|
||||
Offset: offset,
|
||||
Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta),
|
||||
Headers: rec.Headers,
|
||||
})
|
||||
child.offset = offset + 1
|
||||
} else {
|
||||
|
@ -533,14 +547,63 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
|
|||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if incomplete || len(messages) == 0 {
|
||||
return nil, ErrIncompleteResponse
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
|
||||
block := response.GetBlock(child.topic, child.partition)
|
||||
if block == nil {
|
||||
return nil, ErrIncompleteResponse
|
||||
}
|
||||
|
||||
if block.Err != ErrNoError {
|
||||
return nil, block.Err
|
||||
}
|
||||
|
||||
nRecs, err := block.Records.numRecords()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nRecs == 0 {
|
||||
partialTrailingMessage, err := block.Records.isPartial()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We got no messages. If we got a trailing one then we need to ask for more data.
|
||||
// Otherwise we just poll again and wait for one to be produced...
|
||||
if partialTrailingMessage {
|
||||
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
|
||||
// we can't ask for more data, we've hit the configured limit
|
||||
child.sendError(ErrMessageTooLarge)
|
||||
child.offset++ // skip this one so we can keep processing future messages
|
||||
} else {
|
||||
child.fetchSize *= 2
|
||||
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
|
||||
child.fetchSize = child.conf.Consumer.Fetch.Max
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// we got messages, reset our fetch size in case it was increased for a previous request
|
||||
child.fetchSize = child.conf.Consumer.Fetch.Default
|
||||
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
|
||||
|
||||
if control, err := block.Records.isControl(); err != nil || control {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if block.Records.recordsType == legacyRecords {
|
||||
return child.parseMessages(block.Records.msgSet)
|
||||
}
|
||||
return child.parseRecords(block.Records.recordBatch)
|
||||
}
|
||||
|
||||
// brokerConsumer
|
||||
|
||||
type brokerConsumer struct {
|
||||
|
@ -726,6 +789,14 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
|
|||
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
request.Version = 2
|
||||
}
|
||||
if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
|
||||
request.Version = 3
|
||||
request.MaxBytes = MaxResponseSize
|
||||
}
|
||||
if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
|
||||
request.Version = 4
|
||||
request.Isolation = ReadUncommitted // We don't support yet transactions.
|
||||
}
|
||||
|
||||
for child := range bc.subscriptions {
|
||||
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
|
||||
|
|
|
@ -51,7 +51,7 @@ func TestConsumerGroupMemberAssignment(t *testing.T) {
|
|||
amt := &ConsumerGroupMemberAssignment{
|
||||
Version: 1,
|
||||
Topics: map[string][]int32{
|
||||
"one": []int32{0, 2, 4},
|
||||
"one": {0, 2, 4},
|
||||
},
|
||||
UserData: []byte{0x01, 0x02, 0x03},
|
||||
}
|
||||
|
|
|
@ -379,25 +379,41 @@ func TestConsumerShutsDownOutOfRange(t *testing.T) {
|
|||
// requested, then such messages are ignored.
|
||||
func TestConsumerExtraOffsets(t *testing.T) {
|
||||
// Given
|
||||
legacyFetchResponse := &FetchResponse{}
|
||||
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 1)
|
||||
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 2)
|
||||
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 3)
|
||||
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 4)
|
||||
newFetchResponse := &FetchResponse{Version: 4}
|
||||
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 1)
|
||||
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 2)
|
||||
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 3)
|
||||
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 4)
|
||||
newFetchResponse.SetLastStableOffset("my_topic", 0, 4)
|
||||
for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
|
||||
var offsetResponseVersion int16
|
||||
cfg := NewConfig()
|
||||
if fetchResponse1.Version >= 4 {
|
||||
cfg.Version = V0_11_0_0
|
||||
offsetResponseVersion = 1
|
||||
}
|
||||
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
fetchResponse1 := &FetchResponse{}
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 3)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 4)
|
||||
fetchResponse2 := &FetchResponse{}
|
||||
fetchResponse2.Version = fetchResponse1.Version
|
||||
fetchResponse2.AddError("my_topic", 0, ErrNoError)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetVersion(offsetResponseVersion).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0),
|
||||
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -417,29 +433,87 @@ func TestConsumerExtraOffsets(t *testing.T) {
|
|||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// It is fine if offsets of fetched messages are not sequential (although
|
||||
// strictly increasing!).
|
||||
func TestConsumerNonSequentialOffsets(t *testing.T) {
|
||||
func TestConsumeMessageWithNewerFetchAPIVersion(t *testing.T) {
|
||||
// Given
|
||||
fetchResponse1 := &FetchResponse{Version: 4}
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2)
|
||||
|
||||
cfg := NewConfig()
|
||||
cfg.Version = V0_11_0_0
|
||||
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
fetchResponse1 := &FetchResponse{}
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 5)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 7)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 11)
|
||||
fetchResponse2 := &FetchResponse{}
|
||||
fetchResponse2.Version = 4
|
||||
fetchResponse2.AddError("my_topic", 0, ErrNoError)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetVersion(1).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0),
|
||||
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assertMessageOffset(t, <-consumer.Messages(), 1)
|
||||
assertMessageOffset(t, <-consumer.Messages(), 2)
|
||||
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// It is fine if offsets of fetched messages are not sequential (although
|
||||
// strictly increasing!).
|
||||
func TestConsumerNonSequentialOffsets(t *testing.T) {
|
||||
// Given
|
||||
legacyFetchResponse := &FetchResponse{}
|
||||
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 5)
|
||||
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 7)
|
||||
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 11)
|
||||
newFetchResponse := &FetchResponse{Version: 4}
|
||||
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 5)
|
||||
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 7)
|
||||
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 11)
|
||||
newFetchResponse.SetLastStableOffset("my_topic", 0, 11)
|
||||
for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
|
||||
var offsetResponseVersion int16
|
||||
cfg := NewConfig()
|
||||
if fetchResponse1.Version >= 4 {
|
||||
cfg.Version = V0_11_0_0
|
||||
offsetResponseVersion = 1
|
||||
}
|
||||
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
fetchResponse2 := &FetchResponse{Version: fetchResponse1.Version}
|
||||
fetchResponse2.AddError("my_topic", 0, ErrNoError)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetVersion(offsetResponseVersion).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0),
|
||||
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -460,6 +534,7 @@ func TestConsumerNonSequentialOffsets(t *testing.T) {
|
|||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// If leadership for a partition is changing then consumer resolves the new
|
||||
// leader and switches to it.
|
||||
|
@ -803,6 +878,48 @@ func TestConsumerOffsetOutOfRange(t *testing.T) {
|
|||
broker0.Close()
|
||||
}
|
||||
|
||||
func TestConsumerExpiryTicker(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
fetchResponse1 := &FetchResponse{}
|
||||
for i := 1; i <= 8; i++ {
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, int64(i))
|
||||
}
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 1),
|
||||
"FetchRequest": NewMockSequence(fetchResponse1),
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.ChannelBufferSize = 0
|
||||
config.Consumer.MaxProcessingTime = 10 * time.Millisecond
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then: messages with offsets 1 through 8 are read
|
||||
for i := 1; i <= 8; i++ {
|
||||
assertMessageOffset(t, <-consumer.Messages(), int64(i))
|
||||
time.Sleep(2 * time.Millisecond)
|
||||
}
|
||||
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
|
||||
if msg.Offset != expectedOffset {
|
||||
t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
|
||||
|
|
|
@ -2,13 +2,23 @@ package sarama
|
|||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/klauspost/crc32"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
)
|
||||
|
||||
type crcPolynomial int8
|
||||
|
||||
const (
|
||||
crcIEEE crcPolynomial = iota
|
||||
crcCastagnoli
|
||||
)
|
||||
|
||||
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
|
||||
type crc32Field struct {
|
||||
startOffset int
|
||||
polynomial crcPolynomial
|
||||
}
|
||||
|
||||
func (c *crc32Field) saveOffset(in int) {
|
||||
|
@ -19,18 +29,41 @@ func (c *crc32Field) reserveLength() int {
|
|||
return 4
|
||||
}
|
||||
|
||||
func newCRC32Field(polynomial crcPolynomial) *crc32Field {
|
||||
return &crc32Field{polynomial: polynomial}
|
||||
}
|
||||
|
||||
func (c *crc32Field) run(curOffset int, buf []byte) error {
|
||||
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
|
||||
crc, err := c.crc(curOffset, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *crc32Field) check(curOffset int, buf []byte) error {
|
||||
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
|
||||
crc, err := c.crc(curOffset, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) {
|
||||
return PacketDecodingError{"CRC didn't match"}
|
||||
expected := binary.BigEndian.Uint32(buf[c.startOffset:])
|
||||
if crc != expected {
|
||||
return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
|
||||
var tab *crc32.Table
|
||||
switch c.polynomial {
|
||||
case crcIEEE:
|
||||
tab = crc32.IEEETable
|
||||
case crcCastagnoli:
|
||||
tab = castagnoliTable
|
||||
default:
|
||||
return 0, PacketDecodingError{"invalid CRC type"}
|
||||
}
|
||||
return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil
|
||||
}
|
||||
|
|
121
vendor/src/gopkg.in/Shopify/sarama.v1/create_partitions_request.go
vendored
Normal file
121
vendor/src/gopkg.in/Shopify/sarama.v1/create_partitions_request.go
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
|
||||
type CreatePartitionsRequest struct {
|
||||
TopicPartitions map[string]*TopicPartition
|
||||
Timeout time.Duration
|
||||
ValidateOnly bool
|
||||
}
|
||||
|
||||
func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, partition := range c.TopicPartitions {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := partition.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
pe.putInt32(int32(c.Timeout / time.Millisecond))
|
||||
|
||||
pe.putBool(c.ValidateOnly)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.TopicPartitions = make(map[string]*TopicPartition, n)
|
||||
for i := 0; i < n; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.TopicPartitions[topic] = new(TopicPartition)
|
||||
if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
timeout, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Timeout = time.Duration(timeout) * time.Millisecond
|
||||
|
||||
if c.ValidateOnly, err = pd.getBool(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *CreatePartitionsRequest) key() int16 {
|
||||
return 37
|
||||
}
|
||||
|
||||
func (r *CreatePartitionsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
|
||||
return V1_0_0_0
|
||||
}
|
||||
|
||||
type TopicPartition struct {
|
||||
Count int32
|
||||
Assignment [][]int32
|
||||
}
|
||||
|
||||
func (t *TopicPartition) encode(pe packetEncoder) error {
|
||||
pe.putInt32(t.Count)
|
||||
|
||||
if len(t.Assignment) == 0 {
|
||||
pe.putInt32(-1)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(t.Assignment)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, assign := range t.Assignment {
|
||||
if err := pe.putInt32Array(assign); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
|
||||
if t.Count, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n <= 0 {
|
||||
return nil
|
||||
}
|
||||
t.Assignment = make([][]int32, n)
|
||||
|
||||
for i := 0; i < int(n); i++ {
|
||||
if t.Assignment[i], err = pd.getInt32Array(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
50
vendor/src/gopkg.in/Shopify/sarama.v1/create_partitions_request_test.go
vendored
Normal file
50
vendor/src/gopkg.in/Shopify/sarama.v1/create_partitions_request_test.go
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
createPartitionRequestNoAssignment = []byte{
|
||||
0, 0, 0, 1, // one topic
|
||||
0, 5, 't', 'o', 'p', 'i', 'c',
|
||||
0, 0, 0, 3, // 3 partitions
|
||||
255, 255, 255, 255, // no assignments
|
||||
0, 0, 0, 100, // timeout
|
||||
0, // validate only = false
|
||||
}
|
||||
|
||||
createPartitionRequestAssignment = []byte{
|
||||
0, 0, 0, 1,
|
||||
0, 5, 't', 'o', 'p', 'i', 'c',
|
||||
0, 0, 0, 3, // 3 partitions
|
||||
0, 0, 0, 2,
|
||||
0, 0, 0, 2,
|
||||
0, 0, 0, 2, 0, 0, 0, 3,
|
||||
0, 0, 0, 2,
|
||||
0, 0, 0, 3, 0, 0, 0, 1,
|
||||
0, 0, 0, 100,
|
||||
1, // validate only = true
|
||||
}
|
||||
)
|
||||
|
||||
func TestCreatePartitionsRequest(t *testing.T) {
|
||||
req := &CreatePartitionsRequest{
|
||||
TopicPartitions: map[string]*TopicPartition{
|
||||
"topic": &TopicPartition{
|
||||
Count: 3,
|
||||
},
|
||||
},
|
||||
Timeout: 100 * time.Millisecond,
|
||||
}
|
||||
|
||||
buf := testRequestEncode(t, "no assignment", req, createPartitionRequestNoAssignment)
|
||||
testRequestDecode(t, "no assignment", req, buf)
|
||||
|
||||
req.ValidateOnly = true
|
||||
req.TopicPartitions["topic"].Assignment = [][]int32{{2, 3}, {3, 1}}
|
||||
|
||||
buf = testRequestEncode(t, "assignment", req, createPartitionRequestAssignment)
|
||||
testRequestDecode(t, "assignment", req, buf)
|
||||
}
|
94
vendor/src/gopkg.in/Shopify/sarama.v1/create_partitions_response.go
vendored
Normal file
94
vendor/src/gopkg.in/Shopify/sarama.v1/create_partitions_response.go
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
|
||||
type CreatePartitionsResponse struct {
|
||||
ThrottleTime time.Duration
|
||||
TopicPartitionErrors map[string]*TopicPartitionError
|
||||
}
|
||||
|
||||
func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
|
||||
if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, partitionError := range c.TopicPartitionErrors {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := partitionError.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
throttleTime, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
|
||||
for i := 0; i < n; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.TopicPartitionErrors[topic] = new(TopicPartitionError)
|
||||
if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *CreatePartitionsResponse) key() int16 {
|
||||
return 37
|
||||
}
|
||||
|
||||
func (r *CreatePartitionsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
|
||||
return V1_0_0_0
|
||||
}
|
||||
|
||||
type TopicPartitionError struct {
|
||||
Err KError
|
||||
ErrMsg *string
|
||||
}
|
||||
|
||||
func (t *TopicPartitionError) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(t.Err))
|
||||
|
||||
if err := pe.putNullableString(t.ErrMsg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Err = KError(kerr)
|
||||
|
||||
if t.ErrMsg, err = pd.getNullableString(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
52
vendor/src/gopkg.in/Shopify/sarama.v1/create_partitions_response_test.go
vendored
Normal file
52
vendor/src/gopkg.in/Shopify/sarama.v1/create_partitions_response_test.go
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
createPartitionResponseSuccess = []byte{
|
||||
0, 0, 0, 100, // throttleTimeMs
|
||||
0, 0, 0, 1,
|
||||
0, 5, 't', 'o', 'p', 'i', 'c',
|
||||
0, 0, // no error
|
||||
255, 255, // no error message
|
||||
}
|
||||
|
||||
createPartitionResponseFail = []byte{
|
||||
0, 0, 0, 100, // throttleTimeMs
|
||||
0, 0, 0, 1,
|
||||
0, 5, 't', 'o', 'p', 'i', 'c',
|
||||
0, 37, // partition error
|
||||
0, 5, 'e', 'r', 'r', 'o', 'r',
|
||||
}
|
||||
)
|
||||
|
||||
func TestCreatePartitionsResponse(t *testing.T) {
|
||||
resp := &CreatePartitionsResponse{
|
||||
ThrottleTime: 100 * time.Millisecond,
|
||||
TopicPartitionErrors: map[string]*TopicPartitionError{
|
||||
"topic": &TopicPartitionError{},
|
||||
},
|
||||
}
|
||||
|
||||
testResponse(t, "success", resp, createPartitionResponseSuccess)
|
||||
decodedresp := new(CreatePartitionsResponse)
|
||||
testVersionDecodable(t, "success", decodedresp, createPartitionResponseSuccess, 0)
|
||||
if !reflect.DeepEqual(decodedresp, resp) {
|
||||
t.Errorf("Decoding error: expected %v but got %v", decodedresp, resp)
|
||||
}
|
||||
|
||||
errMsg := "error"
|
||||
resp.TopicPartitionErrors["topic"].Err = ErrInvalidPartitions
|
||||
resp.TopicPartitionErrors["topic"].ErrMsg = &errMsg
|
||||
|
||||
testResponse(t, "with errors", resp, createPartitionResponseFail)
|
||||
decodedresp = new(CreatePartitionsResponse)
|
||||
testVersionDecodable(t, "with errors", decodedresp, createPartitionResponseFail, 0)
|
||||
if !reflect.DeepEqual(decodedresp, resp) {
|
||||
t.Errorf("Decoding error: expected %v but got %v", decodedresp, resp)
|
||||
}
|
||||
}
|
|
@ -89,12 +89,13 @@ func (gd *GroupDescription) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
gd.Err = KError(kerr)
|
||||
}
|
||||
|
||||
gd.Err = KError(kerr)
|
||||
|
||||
if gd.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,13 +1,10 @@
|
|||
name: sarama
|
||||
|
||||
up:
|
||||
- go: 1.7.3
|
||||
- go:
|
||||
version: '1.9'
|
||||
|
||||
commands:
|
||||
test:
|
||||
run: make test
|
||||
desc: 'run unit tests'
|
||||
|
||||
packages:
|
||||
- git@github.com:Shopify/dev-shopify.git
|
||||
|
||||
|
|
74
vendor/src/gopkg.in/Shopify/sarama.v1/errors.go
vendored
74
vendor/src/gopkg.in/Shopify/sarama.v1/errors.go
vendored
|
@ -108,12 +108,36 @@ const (
|
|||
ErrUnsupportedSASLMechanism KError = 33
|
||||
ErrIllegalSASLState KError = 34
|
||||
ErrUnsupportedVersion KError = 35
|
||||
ErrTopicAlreadyExists KError = 36
|
||||
ErrInvalidPartitions KError = 37
|
||||
ErrInvalidReplicationFactor KError = 38
|
||||
ErrInvalidReplicaAssignment KError = 39
|
||||
ErrInvalidConfig KError = 40
|
||||
ErrNotController KError = 41
|
||||
ErrInvalidRequest KError = 42
|
||||
ErrUnsupportedForMessageFormat KError = 43
|
||||
ErrPolicyViolation KError = 44
|
||||
ErrOutOfOrderSequenceNumber KError = 45
|
||||
ErrDuplicateSequenceNumber KError = 46
|
||||
ErrInvalidProducerEpoch KError = 47
|
||||
ErrInvalidTxnState KError = 48
|
||||
ErrInvalidProducerIDMapping KError = 49
|
||||
ErrInvalidTransactionTimeout KError = 50
|
||||
ErrConcurrentTransactions KError = 51
|
||||
ErrTransactionCoordinatorFenced KError = 52
|
||||
ErrTransactionalIDAuthorizationFailed KError = 53
|
||||
ErrSecurityDisabled KError = 54
|
||||
ErrOperationNotAttempted KError = 55
|
||||
ErrKafkaStorageError KError = 56
|
||||
ErrLogDirNotFound KError = 57
|
||||
ErrSASLAuthenticationFailed KError = 58
|
||||
ErrUnknownProducerID KError = 59
|
||||
ErrReassignmentInProgress KError = 60
|
||||
)
|
||||
|
||||
func (err KError) Error() string {
|
||||
// Error messages stolen/adapted from
|
||||
// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
|
||||
// https://kafka.apache.org/protocol#protocol_error_codes
|
||||
switch err {
|
||||
case ErrNoError:
|
||||
return "kafka server: Not an error, why are you printing me?"
|
||||
|
@ -189,8 +213,56 @@ func (err KError) Error() string {
|
|||
return "kafka server: Request is not valid given the current SASL state."
|
||||
case ErrUnsupportedVersion:
|
||||
return "kafka server: The version of API is not supported."
|
||||
case ErrTopicAlreadyExists:
|
||||
return "kafka server: Topic with this name already exists."
|
||||
case ErrInvalidPartitions:
|
||||
return "kafka server: Number of partitions is invalid."
|
||||
case ErrInvalidReplicationFactor:
|
||||
return "kafka server: Replication-factor is invalid."
|
||||
case ErrInvalidReplicaAssignment:
|
||||
return "kafka server: Replica assignment is invalid."
|
||||
case ErrInvalidConfig:
|
||||
return "kafka server: Configuration is invalid."
|
||||
case ErrNotController:
|
||||
return "kafka server: This is not the correct controller for this cluster."
|
||||
case ErrInvalidRequest:
|
||||
return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
|
||||
case ErrUnsupportedForMessageFormat:
|
||||
return "kafka server: The requested operation is not supported by the message format version."
|
||||
case ErrPolicyViolation:
|
||||
return "kafka server: Request parameters do not satisfy the configured policy."
|
||||
case ErrOutOfOrderSequenceNumber:
|
||||
return "kafka server: The broker received an out of order sequence number."
|
||||
case ErrDuplicateSequenceNumber:
|
||||
return "kafka server: The broker received a duplicate sequence number."
|
||||
case ErrInvalidProducerEpoch:
|
||||
return "kafka server: Producer attempted an operation with an old epoch."
|
||||
case ErrInvalidTxnState:
|
||||
return "kafka server: The producer attempted a transactional operation in an invalid state."
|
||||
case ErrInvalidProducerIDMapping:
|
||||
return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
|
||||
case ErrInvalidTransactionTimeout:
|
||||
return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
|
||||
case ErrConcurrentTransactions:
|
||||
return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
|
||||
case ErrTransactionCoordinatorFenced:
|
||||
return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
|
||||
case ErrTransactionalIDAuthorizationFailed:
|
||||
return "kafka server: Transactional ID authorization failed."
|
||||
case ErrSecurityDisabled:
|
||||
return "kafka server: Security features are disabled."
|
||||
case ErrOperationNotAttempted:
|
||||
return "kafka server: The broker did not attempt to execute this operation."
|
||||
case ErrKafkaStorageError:
|
||||
return "kafka server: Disk error when trying to access log file on the disk."
|
||||
case ErrLogDirNotFound:
|
||||
return "kafka server: The specified log directory is not found in the broker config."
|
||||
case ErrSASLAuthenticationFailed:
|
||||
return "kafka server: SASL Authentication failed."
|
||||
case ErrUnknownProducerID:
|
||||
return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
|
||||
case ErrReassignmentInProgress:
|
||||
return "kafka server: A partition reassignment is in progress."
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Sarama examples
|
||||
|
||||
This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarams's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama)
|
||||
This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarama's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama)
|
||||
|
||||
In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version.
|
||||
|
||||
|
|
|
@ -21,17 +21,35 @@ func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
|
||||
// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
|
||||
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
|
||||
type FetchRequest struct {
|
||||
MaxWaitTime int32
|
||||
MinBytes int32
|
||||
MaxBytes int32
|
||||
Version int16
|
||||
Isolation IsolationLevel
|
||||
blocks map[string]map[int32]*fetchRequestBlock
|
||||
}
|
||||
|
||||
type IsolationLevel int8
|
||||
|
||||
const (
|
||||
ReadUncommitted IsolationLevel = 0
|
||||
ReadCommitted IsolationLevel = 1
|
||||
)
|
||||
|
||||
func (r *FetchRequest) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt32(-1) // replica ID is always -1 for clients
|
||||
pe.putInt32(r.MaxWaitTime)
|
||||
pe.putInt32(r.MinBytes)
|
||||
if r.Version >= 3 {
|
||||
pe.putInt32(r.MaxBytes)
|
||||
}
|
||||
if r.Version >= 4 {
|
||||
pe.putInt8(int8(r.Isolation))
|
||||
}
|
||||
err = pe.putArrayLength(len(r.blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -67,6 +85,18 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
|
|||
if r.MinBytes, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.Version >= 3 {
|
||||
if r.MaxBytes, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if r.Version >= 4 {
|
||||
isolation, err := pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Isolation = IsolationLevel(isolation)
|
||||
}
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -92,7 +122,7 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
|
|||
}
|
||||
fetchBlock := &fetchRequestBlock{}
|
||||
if err = fetchBlock.decode(pd); err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
r.blocks[topic][partition] = fetchBlock
|
||||
}
|
||||
|
@ -114,6 +144,10 @@ func (r *FetchRequest) requiredVersion() KafkaVersion {
|
|||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
case 3:
|
||||
return V0_10_1_0
|
||||
case 4:
|
||||
return V0_11_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
|
|
|
@ -17,6 +17,15 @@ var (
|
|||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56}
|
||||
|
||||
fetchRequestOneBlockV4 = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0xFF,
|
||||
0x01,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56}
|
||||
)
|
||||
|
||||
func TestFetchRequest(t *testing.T) {
|
||||
|
@ -31,4 +40,9 @@ func TestFetchRequest(t *testing.T) {
|
|||
request.MinBytes = 0
|
||||
request.AddBlock("topic", 0x12, 0x34, 0x56)
|
||||
testRequest(t, "one block", request, fetchRequestOneBlock)
|
||||
|
||||
request.Version = 4
|
||||
request.MaxBytes = 0xFF
|
||||
request.Isolation = ReadCommitted
|
||||
testRequest(t, "one block v4", request, fetchRequestOneBlockV4)
|
||||
}
|
||||
|
|
|
@ -2,13 +2,39 @@ package sarama
|
|||
|
||||
import "time"
|
||||
|
||||
type AbortedTransaction struct {
|
||||
ProducerID int64
|
||||
FirstOffset int64
|
||||
}
|
||||
|
||||
func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
|
||||
if t.ProducerID, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.FirstOffset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt64(t.ProducerID)
|
||||
pe.putInt64(t.FirstOffset)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type FetchResponseBlock struct {
|
||||
Err KError
|
||||
HighWaterMarkOffset int64
|
||||
MsgSet MessageSet
|
||||
LastStableOffset int64
|
||||
AbortedTransactions []*AbortedTransaction
|
||||
Records Records
|
||||
}
|
||||
|
||||
func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
|
||||
func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -20,27 +46,68 @@ func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
msgSetSize, err := pd.getInt32()
|
||||
if version >= 4 {
|
||||
b.LastStableOffset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgSetDecoder, err := pd.getSubset(int(msgSetSize))
|
||||
numTransact, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = (&b.MsgSet).decode(msgSetDecoder)
|
||||
|
||||
if numTransact >= 0 {
|
||||
b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
|
||||
}
|
||||
|
||||
for i := 0; i < numTransact; i++ {
|
||||
transact := new(AbortedTransaction)
|
||||
if err = transact.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
b.AbortedTransactions[i] = transact
|
||||
}
|
||||
}
|
||||
|
||||
recordsSize, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) {
|
||||
recordsDecoder, err := pd.getSubset(int(recordsSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if recordsSize > 0 {
|
||||
if err = b.Records.decode(recordsDecoder); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
|
||||
pe.putInt16(int16(b.Err))
|
||||
|
||||
pe.putInt64(b.HighWaterMarkOffset)
|
||||
|
||||
if version >= 4 {
|
||||
pe.putInt64(b.LastStableOffset)
|
||||
|
||||
if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, transact := range b.AbortedTransactions {
|
||||
if err = transact.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pe.push(&lengthField{})
|
||||
err = b.MsgSet.encode(pe)
|
||||
err = b.Records.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -90,7 +157,7 @@ func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
|
|||
}
|
||||
|
||||
block := new(FetchResponseBlock)
|
||||
err = block.decode(pd)
|
||||
err = block.decode(pd, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -124,7 +191,7 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) {
|
|||
|
||||
for id, block := range partitions {
|
||||
pe.putInt32(id)
|
||||
err = block.encode(pe)
|
||||
err = block.encode(pe, r.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -148,6 +215,10 @@ func (r *FetchResponse) requiredVersion() KafkaVersion {
|
|||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
case 3:
|
||||
return V0_10_1_0
|
||||
case 4:
|
||||
return V0_11_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
|
@ -182,7 +253,7 @@ func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
|
|||
frb.Err = err
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
|
||||
func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
|
||||
}
|
||||
|
@ -196,6 +267,11 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc
|
|||
frb = new(FetchResponseBlock)
|
||||
partitions[partition] = frb
|
||||
}
|
||||
|
||||
return frb
|
||||
}
|
||||
|
||||
func encodeKV(key, value Encoder) ([]byte, []byte) {
|
||||
var kb []byte
|
||||
var vb []byte
|
||||
if key != nil {
|
||||
|
@ -204,7 +280,36 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc
|
|||
if value != nil {
|
||||
vb, _ = value.Encode()
|
||||
}
|
||||
|
||||
return kb, vb
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
|
||||
frb := r.getOrCreateBlock(topic, partition)
|
||||
kb, vb := encodeKV(key, value)
|
||||
msg := &Message{Key: kb, Value: vb}
|
||||
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
|
||||
frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
|
||||
set := frb.Records.msgSet
|
||||
if set == nil {
|
||||
set = &MessageSet{}
|
||||
frb.Records = newLegacyRecords(set)
|
||||
}
|
||||
set.Messages = append(set.Messages, msgBlock)
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
|
||||
frb := r.getOrCreateBlock(topic, partition)
|
||||
kb, vb := encodeKV(key, value)
|
||||
rec := &Record{Key: kb, Value: vb, OffsetDelta: offset}
|
||||
batch := frb.Records.recordBatch
|
||||
if batch == nil {
|
||||
batch = &RecordBatch{Version: 2}
|
||||
frb.Records = newDefaultRecords(batch)
|
||||
}
|
||||
batch.addRecord(rec)
|
||||
}
|
||||
|
||||
func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
|
||||
frb := r.getOrCreateBlock(topic, partition)
|
||||
frb.LastStableOffset = offset
|
||||
}
|
||||
|
|
|
@ -26,6 +26,63 @@ var (
|
|||
0x00,
|
||||
0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
|
||||
|
||||
oneRecordFetchResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00, // ThrottleTime
|
||||
0x00, 0x00, 0x00, 0x01, // Number of Topics
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic
|
||||
0x00, 0x00, 0x00, 0x01, // Number of Partitions
|
||||
0x00, 0x00, 0x00, 0x05, // Partition
|
||||
0x00, 0x01, // Error
|
||||
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset
|
||||
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset
|
||||
0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions
|
||||
0x00, 0x00, 0x00, 0x52, // Records length
|
||||
// recordBatch
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x46,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x02,
|
||||
0xDB, 0x47, 0x14, 0xC9,
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
// record
|
||||
0x28,
|
||||
0x00,
|
||||
0x0A,
|
||||
0x00,
|
||||
0x08, 0x01, 0x02, 0x03, 0x04,
|
||||
0x06, 0x05, 0x06, 0x07,
|
||||
0x02,
|
||||
0x06, 0x08, 0x09, 0x0A,
|
||||
0x04, 0x0B, 0x0C}
|
||||
|
||||
oneMessageFetchResponseV4 = []byte{
|
||||
0x00, 0x00, 0x00, 0x00, // ThrottleTime
|
||||
0x00, 0x00, 0x00, 0x01, // Number of Topics
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic
|
||||
0x00, 0x00, 0x00, 0x01, // Number of Partitions
|
||||
0x00, 0x00, 0x00, 0x05, // Partition
|
||||
0x00, 0x01, // Error
|
||||
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset
|
||||
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset
|
||||
0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions
|
||||
0x00, 0x00, 0x00, 0x1C,
|
||||
// messageSet
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x10,
|
||||
// message
|
||||
0x23, 0x96, 0x4a, 0xf7, // CRC
|
||||
0x00,
|
||||
0x00,
|
||||
0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
|
||||
)
|
||||
|
||||
func TestEmptyFetchResponse(t *testing.T) {
|
||||
|
@ -60,14 +117,121 @@ func TestOneMessageFetchResponse(t *testing.T) {
|
|||
if block.HighWaterMarkOffset != 0x10101010 {
|
||||
t.Error("Decoding didn't produce correct high water mark offset.")
|
||||
}
|
||||
if block.MsgSet.PartialTrailingMessage {
|
||||
partial, err := block.Records.isPartial()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if partial {
|
||||
t.Error("Decoding detected a partial trailing message where there wasn't one.")
|
||||
}
|
||||
|
||||
if len(block.MsgSet.Messages) != 1 {
|
||||
n, err := block.Records.numRecords()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of messages.")
|
||||
}
|
||||
msgBlock := block.MsgSet.Messages[0]
|
||||
msgBlock := block.Records.msgSet.Messages[0]
|
||||
if msgBlock.Offset != 0x550000 {
|
||||
t.Error("Decoding produced incorrect message offset.")
|
||||
}
|
||||
msg := msgBlock.Msg
|
||||
if msg.Codec != CompressionNone {
|
||||
t.Error("Decoding produced incorrect message compression.")
|
||||
}
|
||||
if msg.Key != nil {
|
||||
t.Error("Decoding produced message key where there was none.")
|
||||
}
|
||||
if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) {
|
||||
t.Error("Decoding produced incorrect message value.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOneRecordFetchResponse(t *testing.T) {
|
||||
response := FetchResponse{}
|
||||
testVersionDecodable(t, "one record", &response, oneRecordFetchResponse, 4)
|
||||
|
||||
if len(response.Blocks) != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of topic blocks.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["topic"]) != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
|
||||
}
|
||||
|
||||
block := response.GetBlock("topic", 5)
|
||||
if block == nil {
|
||||
t.Fatal("GetBlock didn't return block.")
|
||||
}
|
||||
if block.Err != ErrOffsetOutOfRange {
|
||||
t.Error("Decoding didn't produce correct error code.")
|
||||
}
|
||||
if block.HighWaterMarkOffset != 0x10101010 {
|
||||
t.Error("Decoding didn't produce correct high water mark offset.")
|
||||
}
|
||||
partial, err := block.Records.isPartial()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if partial {
|
||||
t.Error("Decoding detected a partial trailing record where there wasn't one.")
|
||||
}
|
||||
|
||||
n, err := block.Records.numRecords()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of records.")
|
||||
}
|
||||
rec := block.Records.recordBatch.Records[0]
|
||||
if !bytes.Equal(rec.Key, []byte{0x01, 0x02, 0x03, 0x04}) {
|
||||
t.Error("Decoding produced incorrect record key.")
|
||||
}
|
||||
if !bytes.Equal(rec.Value, []byte{0x05, 0x06, 0x07}) {
|
||||
t.Error("Decoding produced incorrect record value.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOneMessageFetchResponseV4(t *testing.T) {
|
||||
response := FetchResponse{}
|
||||
testVersionDecodable(t, "one message v4", &response, oneMessageFetchResponseV4, 4)
|
||||
|
||||
if len(response.Blocks) != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of topic blocks.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["topic"]) != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
|
||||
}
|
||||
|
||||
block := response.GetBlock("topic", 5)
|
||||
if block == nil {
|
||||
t.Fatal("GetBlock didn't return block.")
|
||||
}
|
||||
if block.Err != ErrOffsetOutOfRange {
|
||||
t.Error("Decoding didn't produce correct error code.")
|
||||
}
|
||||
if block.HighWaterMarkOffset != 0x10101010 {
|
||||
t.Error("Decoding didn't produce correct high water mark offset.")
|
||||
}
|
||||
partial, err := block.Records.isPartial()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if partial {
|
||||
t.Error("Decoding detected a partial trailing record where there wasn't one.")
|
||||
}
|
||||
|
||||
n, err := block.Records.numRecords()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of records.")
|
||||
}
|
||||
msgBlock := block.Records.msgSet.Messages[0]
|
||||
if msgBlock.Offset != 0x550000 {
|
||||
t.Error("Decoding produced incorrect message offset.")
|
||||
}
|
||||
|
|
|
@ -10,11 +10,11 @@ func (r *HeartbeatResponse) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
r.Err = KError(kerr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,11 +1,36 @@
|
|||
package sarama
|
||||
|
||||
type GroupProtocol struct {
|
||||
Name string
|
||||
Metadata []byte
|
||||
}
|
||||
|
||||
func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
|
||||
p.Name, err = pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Metadata, err = pd.getBytes()
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
|
||||
if err := pe.putString(p.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(p.Metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type JoinGroupRequest struct {
|
||||
GroupId string
|
||||
SessionTimeout int32
|
||||
MemberId string
|
||||
ProtocolType string
|
||||
GroupProtocols map[string][]byte
|
||||
GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
|
||||
OrderedGroupProtocols []*GroupProtocol
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) encode(pe packetEncoder) error {
|
||||
|
@ -20,6 +45,11 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if len(r.GroupProtocols) > 0 {
|
||||
if len(r.OrderedGroupProtocols) > 0 {
|
||||
return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -31,6 +61,16 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, protocol := range r.OrderedGroupProtocols {
|
||||
if err := protocol.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -62,16 +102,12 @@ func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
|
|||
|
||||
r.GroupProtocols = make(map[string][]byte)
|
||||
for i := 0; i < n; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
protocol := &GroupProtocol{}
|
||||
if err := protocol.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
metadata, err := pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.GroupProtocols[name] = metadata
|
||||
r.GroupProtocols[protocol.Name] = protocol.Metadata
|
||||
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -90,11 +126,10 @@ func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
|
|||
}
|
||||
|
||||
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
|
||||
if r.GroupProtocols == nil {
|
||||
r.GroupProtocols = make(map[string][]byte)
|
||||
}
|
||||
|
||||
r.GroupProtocols[name] = metadata
|
||||
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
|
||||
Name: name,
|
||||
Metadata: metadata,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
|
||||
|
|
|
@ -23,19 +23,35 @@ var (
|
|||
)
|
||||
|
||||
func TestJoinGroupRequest(t *testing.T) {
|
||||
var request *JoinGroupRequest
|
||||
|
||||
request = new(JoinGroupRequest)
|
||||
request := new(JoinGroupRequest)
|
||||
request.GroupId = "TestGroup"
|
||||
request.SessionTimeout = 100
|
||||
request.ProtocolType = "consumer"
|
||||
testRequest(t, "no protocols", request, joinGroupRequestNoProtocols)
|
||||
}
|
||||
|
||||
request = new(JoinGroupRequest)
|
||||
func TestJoinGroupRequestOneProtocol(t *testing.T) {
|
||||
request := new(JoinGroupRequest)
|
||||
request.GroupId = "TestGroup"
|
||||
request.SessionTimeout = 100
|
||||
request.MemberId = "OneProtocol"
|
||||
request.ProtocolType = "consumer"
|
||||
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
|
||||
testRequest(t, "one protocol", request, joinGroupRequestOneProtocol)
|
||||
packet := testRequestEncode(t, "one protocol", request, joinGroupRequestOneProtocol)
|
||||
request.GroupProtocols = make(map[string][]byte)
|
||||
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
|
||||
testRequestDecode(t, "one protocol", request, packet)
|
||||
}
|
||||
|
||||
func TestJoinGroupRequestDeprecatedEncode(t *testing.T) {
|
||||
request := new(JoinGroupRequest)
|
||||
request.GroupId = "TestGroup"
|
||||
request.SessionTimeout = 100
|
||||
request.MemberId = "OneProtocol"
|
||||
request.ProtocolType = "consumer"
|
||||
request.GroupProtocols = make(map[string][]byte)
|
||||
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
|
||||
packet := testRequestEncode(t, "one protocol", request, joinGroupRequestOneProtocol)
|
||||
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
|
||||
testRequestDecode(t, "one protocol", request, packet)
|
||||
}
|
||||
|
|
|
@ -53,12 +53,13 @@ func (r *JoinGroupResponse) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
r.Err = KError(kerr)
|
||||
|
||||
if r.GenerationId, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -10,11 +10,11 @@ func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
r.Err = KError(kerr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -27,3 +27,43 @@ func (l *lengthField) check(curOffset int, buf []byte) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
type varintLengthField struct {
|
||||
startOffset int
|
||||
length int64
|
||||
}
|
||||
|
||||
func (l *varintLengthField) decode(pd packetDecoder) error {
|
||||
var err error
|
||||
l.length, err = pd.getVarint()
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *varintLengthField) saveOffset(in int) {
|
||||
l.startOffset = in
|
||||
}
|
||||
|
||||
func (l *varintLengthField) adjustLength(currOffset int) int {
|
||||
oldFieldSize := l.reserveLength()
|
||||
l.length = int64(currOffset - l.startOffset - oldFieldSize)
|
||||
|
||||
return l.reserveLength() - oldFieldSize
|
||||
}
|
||||
|
||||
func (l *varintLengthField) reserveLength() int {
|
||||
var tmp [binary.MaxVarintLen64]byte
|
||||
return binary.PutVarint(tmp[:], l.length)
|
||||
}
|
||||
|
||||
func (l *varintLengthField) run(curOffset int, buf []byte) error {
|
||||
binary.PutVarint(buf[l.startOffset:], l.length)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *varintLengthField) check(curOffset int, buf []byte) error {
|
||||
if int64(curOffset-l.startOffset-l.reserveLength()) != l.length {
|
||||
return PacketDecodingError{"length field invalid"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -24,12 +24,13 @@ func (r *ListGroupsResponse) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
r.Err = KError(kerr)
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
18
vendor/src/gopkg.in/Shopify/sarama.v1/message.go
vendored
18
vendor/src/gopkg.in/Shopify/sarama.v1/message.go
vendored
|
@ -37,7 +37,7 @@ type Message struct {
|
|||
}
|
||||
|
||||
func (m *Message) encode(pe packetEncoder) error {
|
||||
pe.push(&crc32Field{})
|
||||
pe.push(newCRC32Field(crcIEEE))
|
||||
|
||||
pe.putInt8(m.Version)
|
||||
|
||||
|
@ -45,7 +45,9 @@ func (m *Message) encode(pe packetEncoder) error {
|
|||
pe.putInt8(attributes)
|
||||
|
||||
if m.Version >= 1 {
|
||||
pe.putInt64(m.Timestamp.UnixNano() / int64(time.Millisecond))
|
||||
if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := pe.putBytes(m.Key)
|
||||
|
@ -104,7 +106,7 @@ func (m *Message) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (m *Message) decode(pd packetDecoder) (err error) {
|
||||
err = pd.push(&crc32Field{})
|
||||
err = pd.push(newCRC32Field(crcIEEE))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -114,18 +116,20 @@ func (m *Message) decode(pd packetDecoder) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
if m.Version > 1 {
|
||||
return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
|
||||
}
|
||||
|
||||
attribute, err := pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Codec = CompressionCodec(attribute & compressionCodecMask)
|
||||
|
||||
if m.Version >= 1 {
|
||||
millis, err := pd.getInt64()
|
||||
if err != nil {
|
||||
if m.Version == 1 {
|
||||
if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
|
||||
}
|
||||
|
||||
m.Key, err = pd.getBytes()
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
@ -13,6 +15,21 @@ var (
|
|||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0xFF, 0xFF, 0xFF, 0xFF} // value
|
||||
|
||||
emptyV1Message = []byte{
|
||||
204, 47, 121, 217, // CRC
|
||||
0x01, // magic version byte
|
||||
0x00, // attribute flags
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // timestamp
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0xFF, 0xFF, 0xFF, 0xFF} // value
|
||||
|
||||
emptyV2Message = []byte{
|
||||
167, 236, 104, 3, // CRC
|
||||
0x02, // magic version byte
|
||||
0x00, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0xFF, 0xFF, 0xFF, 0xFF} // value
|
||||
|
||||
emptyGzipMessage = []byte{
|
||||
97, 79, 149, 90, //CRC
|
||||
0x00, // magic version byte
|
||||
|
@ -24,6 +41,17 @@ var (
|
|||
0x08,
|
||||
0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
emptyGzipMessage18 = []byte{
|
||||
132, 99, 80, 148, //CRC
|
||||
0x00, // magic version byte
|
||||
0x01, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
// value
|
||||
0x00, 0x00, 0x00, 0x17,
|
||||
0x1f, 0x8b,
|
||||
0x08,
|
||||
0, 0, 0, 0, 0, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
emptyLZ4Message = []byte{
|
||||
132, 219, 238, 101, // CRC
|
||||
0x01, // version byte
|
||||
|
@ -79,7 +107,11 @@ func TestMessageEncoding(t *testing.T) {
|
|||
|
||||
message.Value = []byte{}
|
||||
message.Codec = CompressionGZIP
|
||||
if strings.HasPrefix(runtime.Version(), "go1.8") || strings.HasPrefix(runtime.Version(), "go1.9") {
|
||||
testEncodable(t, "empty gzip", &message, emptyGzipMessage18)
|
||||
} else {
|
||||
testEncodable(t, "empty gzip", &message, emptyGzipMessage)
|
||||
}
|
||||
|
||||
message.Value = []byte{}
|
||||
message.Codec = CompressionLZ4
|
||||
|
@ -163,3 +195,19 @@ func TestMessageDecodingBulkLZ4(t *testing.T) {
|
|||
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageDecodingVersion1(t *testing.T) {
|
||||
message := Message{Version: 1}
|
||||
testDecodable(t, "decoding empty v1 message", &message, emptyV1Message)
|
||||
}
|
||||
|
||||
func TestMessageDecodingUnknownVersions(t *testing.T) {
|
||||
message := Message{Version: 2}
|
||||
err := decode(emptyV2Message, &message)
|
||||
if err == nil {
|
||||
t.Error("Decoding did not produce an error for an unknown magic byte")
|
||||
}
|
||||
if err.Error() != "kafka: error decoding packet: unknown magic byte (2)" {
|
||||
t.Error("Decoding an unknown magic byte produced an unknown error ", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -122,6 +122,7 @@ func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
|
|||
type MockOffsetResponse struct {
|
||||
offsets map[string]map[int32]map[int64]int64
|
||||
t TestReporter
|
||||
version int16
|
||||
}
|
||||
|
||||
func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
|
||||
|
@ -131,6 +132,11 @@ func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
|
|||
}
|
||||
}
|
||||
|
||||
func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse {
|
||||
mor.version = version
|
||||
return mor
|
||||
}
|
||||
|
||||
func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
|
||||
partitions := mor.offsets[topic]
|
||||
if partitions == nil {
|
||||
|
@ -148,7 +154,7 @@ func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, of
|
|||
|
||||
func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
|
||||
offsetRequest := reqBody.(*OffsetRequest)
|
||||
offsetResponse := &OffsetResponse{}
|
||||
offsetResponse := &OffsetResponse{Version: mor.version}
|
||||
for topic, partitions := range offsetRequest.blocks {
|
||||
for partition, block := range partitions {
|
||||
offset := mor.getOffset(topic, partition, block.time)
|
||||
|
@ -180,6 +186,7 @@ type MockFetchResponse struct {
|
|||
highWaterMarks map[string]map[int32]int64
|
||||
t TestReporter
|
||||
batchSize int
|
||||
version int16
|
||||
}
|
||||
|
||||
func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
|
||||
|
@ -191,6 +198,11 @@ func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
|
|||
}
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
|
||||
mfr.version = version
|
||||
return mfr
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
|
||||
partitions := mfr.messages[topic]
|
||||
if partitions == nil {
|
||||
|
@ -218,7 +230,9 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of
|
|||
|
||||
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
|
||||
fetchRequest := reqBody.(*FetchRequest)
|
||||
res := &FetchResponse{}
|
||||
res := &FetchResponse{
|
||||
Version: mfr.version,
|
||||
}
|
||||
for topic, partitions := range fetchRequest.blocks {
|
||||
for partition, block := range partitions {
|
||||
initialOffset := block.fetchOffset
|
||||
|
@ -394,7 +408,7 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE
|
|||
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*ProduceRequest)
|
||||
res := &ProduceResponse{}
|
||||
for topic, partitions := range req.msgSets {
|
||||
for topic, partitions := range req.records {
|
||||
for partition := range partitions {
|
||||
res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecke
|
|||
mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf})
|
||||
}
|
||||
|
||||
// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message
|
||||
// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message
|
||||
// will be provided on the input channel. The mock producer will first call the given function to
|
||||
// check the message value. If an error is returned it will be made available on the Errors channel
|
||||
// otherwise the mock will handle the message as if it failed to produce successfully. This means
|
||||
|
|
|
@ -20,7 +20,7 @@ type Consumer struct {
|
|||
|
||||
// NewConsumer returns a new mock Consumer instance. The t argument should
|
||||
// be the *testing.T instance of your test method. An error will be written to it if
|
||||
// an expectation is violated. The config argument is currently unused and can be set to nil.
|
||||
// an expectation is violated. The config argument can be set to nil.
|
||||
func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer {
|
||||
if config == nil {
|
||||
config = sarama.NewConfig()
|
||||
|
@ -178,6 +178,7 @@ func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset
|
|||
// Errors and Messages channel, you should specify what values will be provided on these
|
||||
// channels using YieldMessage and YieldError.
|
||||
type PartitionConsumer struct {
|
||||
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
l sync.Mutex
|
||||
t ErrorReporter
|
||||
topic string
|
||||
|
@ -189,7 +190,6 @@ type PartitionConsumer struct {
|
|||
consumed bool
|
||||
errorsShouldBeDrained bool
|
||||
messagesShouldBeDrained bool
|
||||
highWaterMarkOffset int64
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
|
@ -244,7 +244,7 @@ func (pc *PartitionConsumer) Close() error {
|
|||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for _ = range pc.messages {
|
||||
for range pc.messages {
|
||||
// drain
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -197,8 +197,8 @@ func TestConsumerTopicMetadata(t *testing.T) {
|
|||
consumer := NewConsumer(trm, nil)
|
||||
|
||||
consumer.SetTopicMetadata(map[string][]int32{
|
||||
"test1": []int32{0, 1, 2, 3},
|
||||
"test2": []int32{0, 1, 2, 3, 4, 5, 6, 7},
|
||||
"test1": {0, 1, 2, 3},
|
||||
"test2": {0, 1, 2, 3, 4, 5, 6, 7},
|
||||
})
|
||||
|
||||
topics, err := consumer.Topics()
|
||||
|
|
|
@ -46,29 +46,28 @@ func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int3
|
|||
expectation := sp.expectations[0]
|
||||
sp.expectations = sp.expectations[1:]
|
||||
if expectation.CheckFunction != nil {
|
||||
if val, err := msg.Value.Encode(); err != nil {
|
||||
val, err := msg.Value.Encode()
|
||||
if err != nil {
|
||||
sp.t.Errorf("Input message encoding failed: %s", err.Error())
|
||||
return -1, -1, err
|
||||
} else {
|
||||
err := expectation.CheckFunction(val)
|
||||
if err != nil {
|
||||
sp.t.Errorf("Check function returned an error: %s", err.Error())
|
||||
return -1, -1, err
|
||||
}
|
||||
|
||||
errCheck := expectation.CheckFunction(val)
|
||||
if errCheck != nil {
|
||||
sp.t.Errorf("Check function returned an error: %s", errCheck.Error())
|
||||
return -1, -1, errCheck
|
||||
}
|
||||
}
|
||||
if expectation.Result == errProduceSuccess {
|
||||
sp.lastOffset++
|
||||
msg.Offset = sp.lastOffset
|
||||
return 0, msg.Offset, nil
|
||||
} else {
|
||||
}
|
||||
return -1, -1, expectation.Result
|
||||
}
|
||||
} else {
|
||||
sp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
|
||||
return -1, -1, errOutOfExpectations
|
||||
}
|
||||
}
|
||||
|
||||
// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation.
|
||||
// You have to set expectations on the mock producer before calling SendMessages, so it knows
|
||||
|
@ -89,11 +88,10 @@ func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error {
|
|||
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
}
|
||||
sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.")
|
||||
return errOutOfExpectations
|
||||
}
|
||||
}
|
||||
|
||||
// Close corresponds with the Close method of sarama's SyncProducer implementation.
|
||||
// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow,
|
||||
|
@ -123,7 +121,7 @@ func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueC
|
|||
sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf})
|
||||
}
|
||||
|
||||
// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be
|
||||
// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be
|
||||
// called. The mock producer will first call the given function to check the message value.
|
||||
// It will cascade the error of the function, if any, or handle the message as if it failed
|
||||
// to produce successfully, i.e. by returning the provided error.
|
||||
|
|
|
@ -151,6 +151,13 @@ type PartitionOffsetManager interface {
|
|||
// message twice, and your processing should ideally be idempotent.
|
||||
MarkOffset(offset int64, metadata string)
|
||||
|
||||
// ResetOffset resets to the provided offset, alongside a metadata string that
|
||||
// represents the state of the partition consumer at that point in time. Reset
|
||||
// acts as a counterpart to MarkOffset, the difference being that it allows to
|
||||
// reset an offset to an earlier or smaller value, where MarkOffset only
|
||||
// allows incrementing the offset. cf MarkOffset for more details.
|
||||
ResetOffset(offset int64, metadata string)
|
||||
|
||||
// Errors returns a read channel of errors that occur during offset management, if
|
||||
// enabled. By default, errors are logged and not returned over this channel. If
|
||||
// you want to implement any custom error handling, set your config's
|
||||
|
@ -329,6 +336,17 @@ func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
|
|||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
if offset <= pom.offset {
|
||||
pom.offset = offset
|
||||
pom.metadata = metadata
|
||||
pom.dirty = true
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
|
|
@ -204,6 +204,70 @@ func TestPartitionOffsetManagerNextOffset(t *testing.T) {
|
|||
safeClose(t, testClient)
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerResetOffset(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
|
||||
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrNoError)
|
||||
coordinator.Returns(ocResponse)
|
||||
|
||||
expected := int64(1)
|
||||
pom.ResetOffset(expected, "modified_meta")
|
||||
actual, meta := pom.NextOffset()
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("Expected offset %v. Actual: %v", expected, actual)
|
||||
}
|
||||
if meta != "modified_meta" {
|
||||
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerResetOffsetWithRetention(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
testClient.Config().Consumer.Offsets.Retention = time.Hour
|
||||
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
|
||||
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrNoError)
|
||||
handler := func(req *request) (res encoder) {
|
||||
if req.body.version() != 2 {
|
||||
t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
|
||||
}
|
||||
offsetCommitRequest := req.body.(*OffsetCommitRequest)
|
||||
if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
|
||||
t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
|
||||
}
|
||||
return ocResponse
|
||||
}
|
||||
coordinator.setHandler(handler)
|
||||
|
||||
expected := int64(1)
|
||||
pom.ResetOffset(expected, "modified_meta")
|
||||
actual, meta := pom.NextOffset()
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("Expected offset %v. Actual: %v", expected, actual)
|
||||
}
|
||||
if meta != "modified_meta" {
|
||||
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerMarkOffset(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
|
||||
|
|
|
@ -9,11 +9,16 @@ type packetDecoder interface {
|
|||
getInt16() (int16, error)
|
||||
getInt32() (int32, error)
|
||||
getInt64() (int64, error)
|
||||
getVarint() (int64, error)
|
||||
getArrayLength() (int, error)
|
||||
getBool() (bool, error)
|
||||
|
||||
// Collections
|
||||
getBytes() ([]byte, error)
|
||||
getVarintBytes() ([]byte, error)
|
||||
getRawBytes(length int) ([]byte, error)
|
||||
getString() (string, error)
|
||||
getNullableString() (*string, error)
|
||||
getInt32Array() ([]int32, error)
|
||||
getInt64Array() ([]int64, error)
|
||||
getStringArray() ([]string, error)
|
||||
|
@ -21,6 +26,7 @@ type packetDecoder interface {
|
|||
// Subsets
|
||||
remaining() int
|
||||
getSubset(length int) (packetDecoder, error)
|
||||
peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
|
||||
|
||||
// Stacks, see PushDecoder
|
||||
push(in pushDecoder) error
|
||||
|
@ -43,3 +49,12 @@ type pushDecoder interface {
|
|||
// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
|
||||
check(curOffset int, buf []byte) error
|
||||
}
|
||||
|
||||
// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
|
||||
// fields itself is unknown until its value was decoded (for instance varint encoded length
|
||||
// fields).
|
||||
// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
|
||||
type dynamicPushDecoder interface {
|
||||
pushDecoder
|
||||
decoder
|
||||
}
|
||||
|
|
|
@ -11,12 +11,16 @@ type packetEncoder interface {
|
|||
putInt16(in int16)
|
||||
putInt32(in int32)
|
||||
putInt64(in int64)
|
||||
putVarint(in int64)
|
||||
putArrayLength(in int) error
|
||||
putBool(in bool)
|
||||
|
||||
// Collections
|
||||
putBytes(in []byte) error
|
||||
putVarintBytes(in []byte) error
|
||||
putRawBytes(in []byte) error
|
||||
putString(in string) error
|
||||
putNullableString(in *string) error
|
||||
putStringArray(in []string) error
|
||||
putInt32Array(in []int32) error
|
||||
putInt64Array(in []int64) error
|
||||
|
@ -48,3 +52,14 @@ type pushEncoder interface {
|
|||
// of data to the saved offset, based on the data between the saved offset and curOffset.
|
||||
run(curOffset int, buf []byte) error
|
||||
}
|
||||
|
||||
// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
|
||||
// fields itself is unknown until its value was computed (for instance varint encoded length
|
||||
// fields).
|
||||
type dynamicPushEncoder interface {
|
||||
pushEncoder
|
||||
|
||||
// Called during pop() to adjust the length of the field.
|
||||
// It should return the difference in bytes between the last computed length and current length.
|
||||
adjustLength(currOffset int) int
|
||||
}
|
||||
|
|
|
@ -87,6 +87,18 @@ type hashPartitioner struct {
|
|||
hasher hash.Hash32
|
||||
}
|
||||
|
||||
// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
|
||||
// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
|
||||
// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
|
||||
func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
|
||||
return func(topic string) Partitioner {
|
||||
p := new(hashPartitioner)
|
||||
p.random = NewRandomPartitioner(topic)
|
||||
p.hasher = hasher()
|
||||
return p
|
||||
}
|
||||
}
|
||||
|
||||
// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
|
||||
// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
|
||||
// modulus the number of partitions. This ensures that messages with the same key always end up on the
|
||||
|
|
|
@ -2,6 +2,7 @@ package sarama
|
|||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"hash/fnv"
|
||||
"log"
|
||||
"testing"
|
||||
)
|
||||
|
@ -70,6 +71,55 @@ func TestRoundRobinPartitioner(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNewHashPartitionerWithHasher(t *testing.T) {
|
||||
// use the current default hasher fnv.New32a()
|
||||
partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic")
|
||||
|
||||
choice, err := partitioner.Partition(&ProducerMessage{}, 1)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice != 0 {
|
||||
t.Error("Returned non-zero partition when only one available.")
|
||||
}
|
||||
|
||||
for i := 1; i < 50; i++ {
|
||||
choice, err := partitioner.Partition(&ProducerMessage{}, 50)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice < 0 || choice >= 50 {
|
||||
t.Error("Returned partition", choice, "outside of range for nil key.")
|
||||
}
|
||||
}
|
||||
|
||||
buf := make([]byte, 256)
|
||||
for i := 1; i < 50; i++ {
|
||||
if _, err := rand.Read(buf); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashPartitionerWithHasherMinInt32(t *testing.T) {
|
||||
// use the current default hasher fnv.New32a()
|
||||
partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic")
|
||||
|
||||
msg := ProducerMessage{}
|
||||
// "1468509572224" generates 2147483648 (uint32) result from Sum32 function
|
||||
// which is -2147483648 or int32's min value
|
||||
msg.Key = StringEncoder("1468509572224")
|
||||
|
||||
choice, err := partitioner.Partition(&msg, 50)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice < 0 || choice >= 50 {
|
||||
t.Error("Returned partition", choice, "outside of range for nil key.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashPartitioner(t *testing.T) {
|
||||
partitioner := NewHashPartitioner("mytopic")
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
|
@ -8,6 +9,7 @@ import (
|
|||
)
|
||||
|
||||
type prepEncoder struct {
|
||||
stack []pushEncoder
|
||||
length int
|
||||
}
|
||||
|
||||
|
@ -29,6 +31,11 @@ func (pe *prepEncoder) putInt64(in int64) {
|
|||
pe.length += 8
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putVarint(in int64) {
|
||||
var buf [binary.MaxVarintLen64]byte
|
||||
pe.length += binary.PutVarint(buf[:], in)
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putArrayLength(in int) error {
|
||||
if in > math.MaxInt32 {
|
||||
return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
|
||||
|
@ -37,6 +44,10 @@ func (pe *prepEncoder) putArrayLength(in int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putBool(in bool) {
|
||||
pe.length++
|
||||
}
|
||||
|
||||
// arrays
|
||||
|
||||
func (pe *prepEncoder) putBytes(in []byte) error {
|
||||
|
@ -44,12 +55,17 @@ func (pe *prepEncoder) putBytes(in []byte) error {
|
|||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
if len(in) > math.MaxInt32 {
|
||||
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
|
||||
return pe.putRawBytes(in)
|
||||
}
|
||||
pe.length += len(in)
|
||||
|
||||
func (pe *prepEncoder) putVarintBytes(in []byte) error {
|
||||
if in == nil {
|
||||
pe.putVarint(-1)
|
||||
return nil
|
||||
}
|
||||
pe.putVarint(int64(len(in)))
|
||||
return pe.putRawBytes(in)
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putRawBytes(in []byte) error {
|
||||
if len(in) > math.MaxInt32 {
|
||||
|
@ -59,6 +75,14 @@ func (pe *prepEncoder) putRawBytes(in []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putNullableString(in *string) error {
|
||||
if in == nil {
|
||||
pe.length += 2
|
||||
return nil
|
||||
}
|
||||
return pe.putString(*in)
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putString(in string) error {
|
||||
pe.length += 2
|
||||
if len(in) > math.MaxInt16 {
|
||||
|
@ -108,10 +132,18 @@ func (pe *prepEncoder) offset() int {
|
|||
// stackable
|
||||
|
||||
func (pe *prepEncoder) push(in pushEncoder) {
|
||||
in.saveOffset(pe.length)
|
||||
pe.length += in.reserveLength()
|
||||
pe.stack = append(pe.stack, in)
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) pop() error {
|
||||
in := pe.stack[len(pe.stack)-1]
|
||||
pe.stack = pe.stack[:len(pe.stack)-1]
|
||||
if dpe, ok := in.(dynamicPushEncoder); ok {
|
||||
pe.length += dpe.adjustLength(pe.length)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -21,55 +21,16 @@ const (
|
|||
)
|
||||
|
||||
type ProduceRequest struct {
|
||||
TransactionalID *string
|
||||
RequiredAcks RequiredAcks
|
||||
Timeout int32
|
||||
Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10
|
||||
msgSets map[string]map[int32]*MessageSet
|
||||
Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
|
||||
records map[string]map[int32]Records
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.RequiredAcks))
|
||||
pe.putInt32(r.Timeout)
|
||||
err := pe.putArrayLength(len(r.msgSets))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metricRegistry := pe.metricRegistry()
|
||||
var batchSizeMetric metrics.Histogram
|
||||
var compressionRatioMetric metrics.Histogram
|
||||
if metricRegistry != nil {
|
||||
batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
|
||||
compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
|
||||
}
|
||||
|
||||
totalRecordCount := int64(0)
|
||||
for topic, partitions := range r.msgSets {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
topicRecordCount := int64(0)
|
||||
var topicCompressionRatioMetric metrics.Histogram
|
||||
if metricRegistry != nil {
|
||||
topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
|
||||
}
|
||||
for id, msgSet := range partitions {
|
||||
startOffset := pe.offset()
|
||||
pe.putInt32(id)
|
||||
pe.push(&lengthField{})
|
||||
err = msgSet.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.pop()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if metricRegistry != nil {
|
||||
func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
|
||||
topicCompressionRatioMetric metrics.Histogram) int64 {
|
||||
var topicRecordCount int64
|
||||
for _, messageBlock := range msgSet.Messages {
|
||||
// Is this a fake "message" wrapping real messages?
|
||||
if messageBlock.Msg.Set != nil {
|
||||
|
@ -88,6 +49,74 @@ func (r *ProduceRequest) encode(pe packetEncoder) error {
|
|||
topicCompressionRatioMetric.Update(intCompressionRatio)
|
||||
}
|
||||
}
|
||||
return topicRecordCount
|
||||
}
|
||||
|
||||
func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
|
||||
topicCompressionRatioMetric metrics.Histogram) int64 {
|
||||
if recordBatch.compressedRecords != nil {
|
||||
compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
|
||||
compressionRatioMetric.Update(compressionRatio)
|
||||
topicCompressionRatioMetric.Update(compressionRatio)
|
||||
}
|
||||
|
||||
return int64(len(recordBatch.Records))
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) encode(pe packetEncoder) error {
|
||||
if r.Version >= 3 {
|
||||
if err := pe.putNullableString(r.TransactionalID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
pe.putInt16(int16(r.RequiredAcks))
|
||||
pe.putInt32(r.Timeout)
|
||||
metricRegistry := pe.metricRegistry()
|
||||
var batchSizeMetric metrics.Histogram
|
||||
var compressionRatioMetric metrics.Histogram
|
||||
if metricRegistry != nil {
|
||||
batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
|
||||
compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
|
||||
}
|
||||
totalRecordCount := int64(0)
|
||||
|
||||
err := pe.putArrayLength(len(r.records))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, partitions := range r.records {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
topicRecordCount := int64(0)
|
||||
var topicCompressionRatioMetric metrics.Histogram
|
||||
if metricRegistry != nil {
|
||||
topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
|
||||
}
|
||||
for id, records := range partitions {
|
||||
startOffset := pe.offset()
|
||||
pe.putInt32(id)
|
||||
pe.push(&lengthField{})
|
||||
err = records.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.pop()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if metricRegistry != nil {
|
||||
if r.Version >= 3 {
|
||||
topicRecordCount += updateBatchMetrics(records.recordBatch, compressionRatioMetric, topicCompressionRatioMetric)
|
||||
} else {
|
||||
topicRecordCount += updateMsgSetMetrics(records.msgSet, compressionRatioMetric, topicCompressionRatioMetric)
|
||||
}
|
||||
batchSize := int64(pe.offset() - startOffset)
|
||||
batchSizeMetric.Update(batchSize)
|
||||
getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
|
||||
|
@ -108,6 +137,15 @@ func (r *ProduceRequest) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
|
||||
r.Version = version
|
||||
|
||||
if version >= 3 {
|
||||
id, err := pd.getNullableString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.TransactionalID = id
|
||||
}
|
||||
requiredAcks, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -123,7 +161,8 @@ func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
|
|||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.msgSets = make(map[string]map[int32]*MessageSet)
|
||||
|
||||
r.records = make(map[string]map[int32]Records)
|
||||
for i := 0; i < topicCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
|
@ -133,28 +172,29 @@ func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.msgSets[topic] = make(map[int32]*MessageSet)
|
||||
r.records[topic] = make(map[int32]Records)
|
||||
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
messageSetSize, err := pd.getInt32()
|
||||
size, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgSetDecoder, err := pd.getSubset(int(messageSetSize))
|
||||
recordsDecoder, err := pd.getSubset(int(size))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgSet := &MessageSet{}
|
||||
err = msgSet.decode(msgSetDecoder)
|
||||
if err != nil {
|
||||
var records Records
|
||||
if err := records.decode(recordsDecoder); err != nil {
|
||||
return err
|
||||
}
|
||||
r.msgSets[topic][partition] = msgSet
|
||||
r.records[topic][partition] = records
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -172,38 +212,41 @@ func (r *ProduceRequest) requiredVersion() KafkaVersion {
|
|||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
case 3:
|
||||
return V0_11_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
|
||||
if r.records == nil {
|
||||
r.records = make(map[string]map[int32]Records)
|
||||
}
|
||||
|
||||
if r.records[topic] == nil {
|
||||
r.records[topic] = make(map[int32]Records)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
|
||||
if r.msgSets == nil {
|
||||
r.msgSets = make(map[string]map[int32]*MessageSet)
|
||||
}
|
||||
|
||||
if r.msgSets[topic] == nil {
|
||||
r.msgSets[topic] = make(map[int32]*MessageSet)
|
||||
}
|
||||
|
||||
set := r.msgSets[topic][partition]
|
||||
r.ensureRecords(topic, partition)
|
||||
set := r.records[topic][partition].msgSet
|
||||
|
||||
if set == nil {
|
||||
set = new(MessageSet)
|
||||
r.msgSets[topic][partition] = set
|
||||
r.records[topic][partition] = newLegacyRecords(set)
|
||||
}
|
||||
|
||||
set.addMessage(msg)
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
|
||||
if r.msgSets == nil {
|
||||
r.msgSets = make(map[string]map[int32]*MessageSet)
|
||||
r.ensureRecords(topic, partition)
|
||||
r.records[topic][partition] = newLegacyRecords(set)
|
||||
}
|
||||
|
||||
if r.msgSets[topic] == nil {
|
||||
r.msgSets[topic] = make(map[int32]*MessageSet)
|
||||
}
|
||||
|
||||
r.msgSets[topic][partition] = set
|
||||
func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
|
||||
r.ensureRecords(topic, partition)
|
||||
r.records[topic][partition] = newDefaultRecords(batch)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package sarama
|
|||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -32,6 +33,41 @@ var (
|
|||
0x00,
|
||||
0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
|
||||
|
||||
produceRequestOneRecord = []byte{
|
||||
0xFF, 0xFF, // Transaction ID
|
||||
0x01, 0x23, // Required Acks
|
||||
0x00, 0x00, 0x04, 0x44, // Timeout
|
||||
0x00, 0x00, 0x00, 0x01, // Number of Topics
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic
|
||||
0x00, 0x00, 0x00, 0x01, // Number of Partitions
|
||||
0x00, 0x00, 0x00, 0xAD, // Partition
|
||||
0x00, 0x00, 0x00, 0x52, // Records length
|
||||
// recordBatch
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x46,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x02,
|
||||
0x54, 0x79, 0x61, 0xFD,
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x01, 0x58, 0x8D, 0xCD, 0x59, 0x38,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
// record
|
||||
0x28,
|
||||
0x00,
|
||||
0x0A,
|
||||
0x00,
|
||||
0x08, 0x01, 0x02, 0x03, 0x04,
|
||||
0x06, 0x05, 0x06, 0x07,
|
||||
0x02,
|
||||
0x06, 0x08, 0x09, 0x0A,
|
||||
0x04, 0x0B, 0x0C,
|
||||
}
|
||||
)
|
||||
|
||||
func TestProduceRequest(t *testing.T) {
|
||||
|
@ -44,4 +80,24 @@ func TestProduceRequest(t *testing.T) {
|
|||
|
||||
request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}})
|
||||
testRequest(t, "one message", request, produceRequestOneMessage)
|
||||
|
||||
request.Version = 3
|
||||
batch := &RecordBatch{
|
||||
Version: 2,
|
||||
FirstTimestamp: time.Unix(1479847795, 0),
|
||||
MaxTimestamp: time.Unix(0, 0),
|
||||
Records: []*Record{{
|
||||
TimestampDelta: 5 * time.Millisecond,
|
||||
Key: []byte{0x01, 0x02, 0x03, 0x04},
|
||||
Value: []byte{0x05, 0x06, 0x07},
|
||||
Headers: []*RecordHeader{{
|
||||
Key: []byte{0x08, 0x09, 0x0A},
|
||||
Value: []byte{0x0B, 0x0C},
|
||||
}},
|
||||
}},
|
||||
}
|
||||
request.AddBatch("topic", 0xAD, batch)
|
||||
packet := testRequestEncode(t, "one record", request, produceRequestOneRecord)
|
||||
batch.Records[0].length.startOffset = 0
|
||||
testRequestDecode(t, "one record", request, packet)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ProduceResponseBlock struct {
|
||||
Err KError
|
||||
|
@ -32,6 +35,23 @@ func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err erro
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
|
||||
pe.putInt16(int16(b.Err))
|
||||
pe.putInt64(b.Offset)
|
||||
|
||||
if version >= 2 {
|
||||
timestamp := int64(-1)
|
||||
if !b.Timestamp.Before(time.Unix(0, 0)) {
|
||||
timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
|
||||
} else if !b.Timestamp.IsZero() {
|
||||
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
|
||||
}
|
||||
pe.putInt64(timestamp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ProduceResponse struct {
|
||||
Blocks map[string]map[int32]*ProduceResponseBlock
|
||||
Version int16
|
||||
|
@ -76,11 +96,12 @@ func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
|
|||
}
|
||||
|
||||
if r.Version >= 1 {
|
||||
if millis, err := pd.getInt32(); err != nil {
|
||||
millis, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.ThrottleTime = time.Duration(millis) * time.Millisecond
|
||||
}
|
||||
|
||||
r.ThrottleTime = time.Duration(millis) * time.Millisecond
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -102,8 +123,10 @@ func (r *ProduceResponse) encode(pe packetEncoder) error {
|
|||
}
|
||||
for id, prb := range partitions {
|
||||
pe.putInt32(id)
|
||||
pe.putInt16(int16(prb.Err))
|
||||
pe.putInt64(prb.Offset)
|
||||
err = prb.encode(pe, r.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if r.Version >= 1 {
|
||||
|
@ -126,6 +149,8 @@ func (r *ProduceResponse) requiredVersion() KafkaVersion {
|
|||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
case 3:
|
||||
return V0_11_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
|
|
|
@ -1,67 +1,128 @@
|
|||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
produceResponseNoBlocks = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
produceResponseManyBlocks = []byte{
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
0x00, 0x03, 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
|
||||
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
0x00, 0x02,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestProduceResponse(t *testing.T) {
|
||||
var (
|
||||
produceResponseNoBlocksV0 = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
produceResponseManyBlocksVersions = [][]byte{
|
||||
{
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
|
||||
0x00, 0x00, 0x00, 0x01, // Partition 1
|
||||
0x00, 0x02, // ErrInvalidMessage
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255
|
||||
}, {
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
|
||||
0x00, 0x00, 0x00, 0x01, // Partition 1
|
||||
0x00, 0x02, // ErrInvalidMessage
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255
|
||||
|
||||
0x00, 0x00, 0x00, 0x64, // 100 ms throttle time
|
||||
}, {
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
|
||||
0x00, 0x00, 0x00, 0x01, // Partition 1
|
||||
0x00, 0x02, // ErrInvalidMessage
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE8, // Timestamp January 1st 0001 at 00:00:01,000 UTC (LogAppendTime was used)
|
||||
|
||||
0x00, 0x00, 0x00, 0x64, // 100 ms throttle time
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestProduceResponseDecode(t *testing.T) {
|
||||
response := ProduceResponse{}
|
||||
|
||||
testVersionDecodable(t, "no blocks", &response, produceResponseNoBlocks, 0)
|
||||
testVersionDecodable(t, "no blocks", &response, produceResponseNoBlocksV0, 0)
|
||||
if len(response.Blocks) != 0 {
|
||||
t.Error("Decoding produced", len(response.Blocks), "topics where there were none")
|
||||
}
|
||||
|
||||
testVersionDecodable(t, "many blocks", &response, produceResponseManyBlocks, 0)
|
||||
if len(response.Blocks) != 2 {
|
||||
t.Error("Decoding produced", len(response.Blocks), "topics where there were 2")
|
||||
for v, produceResponseManyBlocks := range produceResponseManyBlocksVersions {
|
||||
t.Logf("Decoding produceResponseManyBlocks version %d", v)
|
||||
testVersionDecodable(t, "many blocks", &response, produceResponseManyBlocks, int16(v))
|
||||
if len(response.Blocks) != 1 {
|
||||
t.Error("Decoding produced", len(response.Blocks), "topics where there was 1")
|
||||
}
|
||||
if len(response.Blocks["foo"]) != 0 {
|
||||
t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there were none")
|
||||
if len(response.Blocks["foo"]) != 1 {
|
||||
t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there was one")
|
||||
}
|
||||
if len(response.Blocks["bar"]) != 2 {
|
||||
t.Error("Decoding produced", len(response.Blocks["bar"]), "partitions for 'bar' where there were two")
|
||||
}
|
||||
block := response.GetBlock("bar", 1)
|
||||
block := response.GetBlock("foo", 1)
|
||||
if block == nil {
|
||||
t.Error("Decoding did not produce a block for bar/1")
|
||||
} else {
|
||||
if block.Err != ErrNoError {
|
||||
t.Error("Decoding failed for bar/1/Err, got:", int16(block.Err))
|
||||
}
|
||||
if block.Offset != 0xFF {
|
||||
t.Error("Decoding failed for bar/1/Offset, got:", block.Offset)
|
||||
}
|
||||
}
|
||||
block = response.GetBlock("bar", 2)
|
||||
if block == nil {
|
||||
t.Error("Decoding did not produce a block for bar/2")
|
||||
t.Error("Decoding did not produce a block for foo/1")
|
||||
} else {
|
||||
if block.Err != ErrInvalidMessage {
|
||||
t.Error("Decoding failed for bar/2/Err, got:", int16(block.Err))
|
||||
t.Error("Decoding failed for foo/2/Err, got:", int16(block.Err))
|
||||
}
|
||||
if block.Offset != 0 {
|
||||
t.Error("Decoding failed for bar/2/Offset, got:", block.Offset)
|
||||
if block.Offset != 255 {
|
||||
t.Error("Decoding failed for foo/1/Offset, got:", block.Offset)
|
||||
}
|
||||
if v >= 2 {
|
||||
if block.Timestamp != time.Unix(1, 0) {
|
||||
t.Error("Decoding failed for foo/2/Timestamp, got:", block.Timestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
if v >= 1 {
|
||||
if expected := 100 * time.Millisecond; response.ThrottleTime != expected {
|
||||
t.Error("Failed decoding produced throttle time, expected:", expected, ", got:", response.ThrottleTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProduceResponseEncode(t *testing.T) {
|
||||
response := ProduceResponse{}
|
||||
response.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
|
||||
testEncodable(t, "empty", &response, produceResponseNoBlocksV0)
|
||||
|
||||
response.Blocks["foo"] = make(map[int32]*ProduceResponseBlock)
|
||||
response.Blocks["foo"][1] = &ProduceResponseBlock{
|
||||
Err: ErrInvalidMessage,
|
||||
Offset: 255,
|
||||
Timestamp: time.Unix(1, 0),
|
||||
}
|
||||
response.ThrottleTime = 100 * time.Millisecond
|
||||
for v, produceResponseManyBlocks := range produceResponseManyBlocksVersions {
|
||||
response.Version = int16(v)
|
||||
testEncodable(t, fmt.Sprintf("many blocks version %d", v), &response, produceResponseManyBlocks)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProduceResponseEncodeInvalidTimestamp(t *testing.T) {
|
||||
response := ProduceResponse{}
|
||||
response.Version = 2
|
||||
response.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
|
||||
response.Blocks["t"] = make(map[int32]*ProduceResponseBlock)
|
||||
response.Blocks["t"][0] = &ProduceResponseBlock{
|
||||
Err: ErrNoError,
|
||||
Offset: 0,
|
||||
// Use a timestamp before Unix time
|
||||
Timestamp: time.Unix(0, 0).Add(-1 * time.Millisecond),
|
||||
}
|
||||
response.ThrottleTime = 100 * time.Millisecond
|
||||
_, err := encode(&response, nil)
|
||||
if err == nil {
|
||||
t.Error("Expecting error, got nil")
|
||||
}
|
||||
if _, ok := err.(PacketEncodingError); !ok {
|
||||
t.Error("Expecting PacketEncodingError, got:", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"encoding/binary"
|
||||
"time"
|
||||
)
|
||||
|
||||
type partitionSet struct {
|
||||
msgs []*ProducerMessage
|
||||
setToSend *MessageSet
|
||||
recordsToSend Records
|
||||
bufferBytes int
|
||||
}
|
||||
|
||||
|
@ -39,31 +42,64 @@ func (ps *produceSet) add(msg *ProducerMessage) error {
|
|||
}
|
||||
}
|
||||
|
||||
timestamp := msg.Timestamp
|
||||
if msg.Timestamp.IsZero() {
|
||||
timestamp = time.Now()
|
||||
}
|
||||
|
||||
partitions := ps.msgs[msg.Topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]*partitionSet)
|
||||
ps.msgs[msg.Topic] = partitions
|
||||
}
|
||||
|
||||
var size int
|
||||
|
||||
set := partitions[msg.Partition]
|
||||
if set == nil {
|
||||
set = &partitionSet{setToSend: new(MessageSet)}
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
|
||||
batch := &RecordBatch{
|
||||
FirstTimestamp: timestamp,
|
||||
Version: 2,
|
||||
ProducerID: -1, /* No producer id */
|
||||
Codec: ps.parent.conf.Producer.Compression,
|
||||
}
|
||||
set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
|
||||
size = recordBatchOverhead
|
||||
} else {
|
||||
set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
|
||||
}
|
||||
partitions[msg.Partition] = set
|
||||
}
|
||||
|
||||
set.msgs = append(set.msgs, msg)
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
|
||||
// We are being conservative here to avoid having to prep encode the record
|
||||
size += maximumRecordOverhead
|
||||
rec := &Record{
|
||||
Key: key,
|
||||
Value: val,
|
||||
TimestampDelta: timestamp.Sub(set.recordsToSend.recordBatch.FirstTimestamp),
|
||||
}
|
||||
size += len(key) + len(val)
|
||||
if len(msg.Headers) > 0 {
|
||||
rec.Headers = make([]*RecordHeader, len(msg.Headers))
|
||||
for i := range msg.Headers {
|
||||
rec.Headers[i] = &msg.Headers[i]
|
||||
size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
|
||||
}
|
||||
}
|
||||
set.recordsToSend.recordBatch.addRecord(rec)
|
||||
} else {
|
||||
msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
if msg.Timestamp.IsZero() {
|
||||
msgToSend.Timestamp = time.Now()
|
||||
} else {
|
||||
msgToSend.Timestamp = msg.Timestamp
|
||||
}
|
||||
msgToSend.Timestamp = timestamp
|
||||
msgToSend.Version = 1
|
||||
}
|
||||
set.setToSend.addMessage(msgToSend)
|
||||
set.recordsToSend.msgSet.addMessage(msgToSend)
|
||||
size = producerMessageOverhead + len(key) + len(val)
|
||||
}
|
||||
|
||||
size := producerMessageOverhead + len(key) + len(val)
|
||||
set.bufferBytes += size
|
||||
ps.bufferBytes += size
|
||||
ps.bufferCount++
|
||||
|
@ -79,17 +115,24 @@ func (ps *produceSet) buildRequest() *ProduceRequest {
|
|||
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
req.Version = 2
|
||||
}
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
|
||||
req.Version = 3
|
||||
}
|
||||
|
||||
for topic, partitionSet := range ps.msgs {
|
||||
for partition, set := range partitionSet {
|
||||
if req.Version >= 3 {
|
||||
req.AddBatch(topic, partition, set.recordsToSend.recordBatch)
|
||||
continue
|
||||
}
|
||||
if ps.parent.conf.Producer.Compression == CompressionNone {
|
||||
req.AddSet(topic, partition, set.setToSend)
|
||||
req.AddSet(topic, partition, set.recordsToSend.msgSet)
|
||||
} else {
|
||||
// When compression is enabled, the entire set for each partition is compressed
|
||||
// and sent as the payload of a single fake "message" with the appropriate codec
|
||||
// set and no key. When the server sees a message with a compression codec, it
|
||||
// decompresses the payload and treats the result as its message set.
|
||||
payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry)
|
||||
payload, err := encode(set.recordsToSend.msgSet, ps.parent.conf.MetricRegistry)
|
||||
if err != nil {
|
||||
Logger.Println(err) // if this happens, it's basically our fault.
|
||||
panic(err)
|
||||
|
@ -98,11 +141,11 @@ func (ps *produceSet) buildRequest() *ProduceRequest {
|
|||
Codec: ps.parent.conf.Producer.Compression,
|
||||
Key: nil,
|
||||
Value: payload,
|
||||
Set: set.setToSend, // Provide the underlying message set for accurate metrics
|
||||
Set: set.recordsToSend.msgSet, // Provide the underlying message set for accurate metrics
|
||||
}
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
compMsg.Version = 1
|
||||
compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp
|
||||
compMsg.Timestamp = set.recordsToSend.msgSet.Messages[0].Msg.Timestamp
|
||||
}
|
||||
req.AddMessage(topic, partition, compMsg)
|
||||
}
|
||||
|
@ -135,14 +178,19 @@ func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMe
|
|||
}
|
||||
|
||||
func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
|
||||
version := 1
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
|
||||
version = 2
|
||||
}
|
||||
|
||||
switch {
|
||||
// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
|
||||
case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
|
||||
case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
|
||||
return true
|
||||
// Would we overflow the size-limit of a compressed message-batch for this partition?
|
||||
case ps.parent.conf.Producer.Compression != CompressionNone &&
|
||||
ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
|
||||
ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes:
|
||||
ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
|
||||
return true
|
||||
// Would we overflow simply in number of messages?
|
||||
case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
@ -137,7 +138,7 @@ func TestProduceSetRequestBuilding(t *testing.T) {
|
|||
t.Error("Timeout not set properly")
|
||||
}
|
||||
|
||||
if len(req.msgSets) != 2 {
|
||||
if len(req.records) != 2 {
|
||||
t.Error("Wrong number of topics in request")
|
||||
}
|
||||
}
|
||||
|
@ -166,7 +167,7 @@ func TestProduceSetCompressedRequestBuilding(t *testing.T) {
|
|||
t.Error("Wrong request version")
|
||||
}
|
||||
|
||||
for _, msgBlock := range req.msgSets["t1"][0].Messages {
|
||||
for _, msgBlock := range req.records["t1"][0].msgSet.Messages {
|
||||
msg := msgBlock.Msg
|
||||
err := msg.decodeSet()
|
||||
if err != nil {
|
||||
|
@ -183,3 +184,65 @@ func TestProduceSetCompressedRequestBuilding(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProduceSetV3RequestBuilding(t *testing.T) {
|
||||
parent, ps := makeProduceSet()
|
||||
parent.conf.Producer.RequiredAcks = WaitForAll
|
||||
parent.conf.Producer.Timeout = 10 * time.Second
|
||||
parent.conf.Version = V0_11_0_0
|
||||
|
||||
now := time.Now()
|
||||
msg := &ProducerMessage{
|
||||
Topic: "t1",
|
||||
Partition: 0,
|
||||
Key: StringEncoder(TestMessage),
|
||||
Value: StringEncoder(TestMessage),
|
||||
Headers: []RecordHeader{
|
||||
RecordHeader{
|
||||
Key: []byte("header-1"),
|
||||
Value: []byte("value-1"),
|
||||
},
|
||||
RecordHeader{
|
||||
Key: []byte("header-2"),
|
||||
Value: []byte("value-2"),
|
||||
},
|
||||
RecordHeader{
|
||||
Key: []byte("header-3"),
|
||||
Value: []byte("value-3"),
|
||||
},
|
||||
},
|
||||
Timestamp: now,
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
safeAddMessage(t, ps, msg)
|
||||
msg.Timestamp = msg.Timestamp.Add(time.Second)
|
||||
}
|
||||
|
||||
req := ps.buildRequest()
|
||||
|
||||
if req.Version != 3 {
|
||||
t.Error("Wrong request version")
|
||||
}
|
||||
|
||||
batch := req.records["t1"][0].recordBatch
|
||||
if batch.FirstTimestamp != now {
|
||||
t.Errorf("Wrong first timestamp: %v", batch.FirstTimestamp)
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
rec := batch.Records[i]
|
||||
if rec.TimestampDelta != time.Duration(i)*time.Second {
|
||||
t.Errorf("Wrong timestamp delta: %v", rec.TimestampDelta)
|
||||
}
|
||||
|
||||
for j, h := range batch.Records[i].Headers {
|
||||
exp := fmt.Sprintf("header-%d", j+1)
|
||||
if string(h.Key) != exp {
|
||||
t.Errorf("Wrong header key, expected %v, got %v", exp, h.Key)
|
||||
}
|
||||
exp = fmt.Sprintf("value-%d", j+1)
|
||||
if string(h.Value) != exp {
|
||||
t.Errorf("Wrong header value, expected %v, got %v", exp, h.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,8 +7,11 @@ import (
|
|||
|
||||
var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
|
||||
var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
|
||||
var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"}
|
||||
var errInvalidStringLength = PacketDecodingError{"invalid string length"}
|
||||
var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
|
||||
var errVarintOverflow = PacketDecodingError{"varint overflow"}
|
||||
var errInvalidBool = PacketDecodingError{"invalid bool"}
|
||||
|
||||
type realDecoder struct {
|
||||
raw []byte
|
||||
|
@ -58,12 +61,26 @@ func (rd *realDecoder) getInt64() (int64, error) {
|
|||
return tmp, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getVarint() (int64, error) {
|
||||
tmp, n := binary.Varint(rd.raw[rd.off:])
|
||||
if n == 0 {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
}
|
||||
if n < 0 {
|
||||
rd.off -= n
|
||||
return -1, errVarintOverflow
|
||||
}
|
||||
rd.off += n
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getArrayLength() (int, error) {
|
||||
if rd.remaining() < 4 {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
}
|
||||
tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
|
||||
rd.off += 4
|
||||
if tmp > rd.remaining() {
|
||||
rd.off = len(rd.raw)
|
||||
|
@ -74,60 +91,84 @@ func (rd *realDecoder) getArrayLength() (int, error) {
|
|||
return tmp, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getBool() (bool, error) {
|
||||
b, err := rd.getInt8()
|
||||
if err != nil || b == 0 {
|
||||
return false, err
|
||||
}
|
||||
if b != 1 {
|
||||
return false, errInvalidBool
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// collections
|
||||
|
||||
func (rd *realDecoder) getBytes() ([]byte, error) {
|
||||
tmp, err := rd.getInt32()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tmp == -1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
n := int(tmp)
|
||||
return rd.getRawBytes(int(tmp))
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getVarintBytes() ([]byte, error) {
|
||||
tmp, err := rd.getVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tmp == -1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return rd.getRawBytes(int(tmp))
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getStringLength() (int, error) {
|
||||
length, err := rd.getInt16()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n := int(length)
|
||||
|
||||
switch {
|
||||
case n < -1:
|
||||
return nil, errInvalidByteSliceLength
|
||||
case n == -1:
|
||||
return nil, nil
|
||||
case n == 0:
|
||||
return make([]byte, 0), nil
|
||||
return 0, errInvalidStringLength
|
||||
case n > rd.remaining():
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
return 0, ErrInsufficientData
|
||||
}
|
||||
|
||||
tmpStr := rd.raw[rd.off : rd.off+n]
|
||||
rd.off += n
|
||||
return tmpStr, nil
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getString() (string, error) {
|
||||
tmp, err := rd.getInt16()
|
||||
|
||||
if err != nil {
|
||||
n, err := rd.getStringLength()
|
||||
if err != nil || n == -1 {
|
||||
return "", err
|
||||
}
|
||||
|
||||
n := int(tmp)
|
||||
|
||||
switch {
|
||||
case n < -1:
|
||||
return "", errInvalidStringLength
|
||||
case n == -1:
|
||||
return "", nil
|
||||
case n == 0:
|
||||
return "", nil
|
||||
case n > rd.remaining():
|
||||
rd.off = len(rd.raw)
|
||||
return "", ErrInsufficientData
|
||||
}
|
||||
|
||||
tmpStr := string(rd.raw[rd.off : rd.off+n])
|
||||
rd.off += n
|
||||
return tmpStr, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getNullableString() (*string, error) {
|
||||
n, err := rd.getStringLength()
|
||||
if err != nil || n == -1 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tmpStr := string(rd.raw[rd.off : rd.off+n])
|
||||
rd.off += n
|
||||
return &tmpStr, err
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getInt32Array() ([]int32, error) {
|
||||
if rd.remaining() < 4 {
|
||||
rd.off = len(rd.raw)
|
||||
|
@ -204,11 +245,12 @@ func (rd *realDecoder) getStringArray() ([]string, error) {
|
|||
|
||||
ret := make([]string, n)
|
||||
for i := range ret {
|
||||
if str, err := rd.getString(); err != nil {
|
||||
str, err := rd.getString()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
ret[i] = str
|
||||
}
|
||||
|
||||
ret[i] = str
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
@ -220,8 +262,16 @@ func (rd *realDecoder) remaining() int {
|
|||
}
|
||||
|
||||
func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
|
||||
buf, err := rd.getRawBytes(length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &realDecoder{raw: buf}, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
|
||||
if length < 0 {
|
||||
return nil, errInvalidSubsetSize
|
||||
return nil, errInvalidByteSliceLength
|
||||
} else if length > rd.remaining() {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
|
@ -229,7 +279,15 @@ func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
|
|||
|
||||
start := rd.off
|
||||
rd.off += length
|
||||
return &realDecoder{raw: rd.raw[start:rd.off]}, nil
|
||||
return rd.raw[start:rd.off], nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
|
||||
if rd.remaining() < offset+length {
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
off := rd.off + offset
|
||||
return &realDecoder{raw: rd.raw[off : off+length]}, nil
|
||||
}
|
||||
|
||||
// stacks
|
||||
|
@ -237,11 +295,18 @@ func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
|
|||
func (rd *realDecoder) push(in pushDecoder) error {
|
||||
in.saveOffset(rd.off)
|
||||
|
||||
reserve := in.reserveLength()
|
||||
var reserve int
|
||||
if dpd, ok := in.(dynamicPushDecoder); ok {
|
||||
if err := dpd.decode(rd); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
reserve = in.reserveLength()
|
||||
if rd.remaining() < reserve {
|
||||
rd.off = len(rd.raw)
|
||||
return ErrInsufficientData
|
||||
}
|
||||
}
|
||||
|
||||
rd.stack = append(rd.stack, in)
|
||||
|
||||
|
|
|
@ -35,11 +35,23 @@ func (re *realEncoder) putInt64(in int64) {
|
|||
re.off += 8
|
||||
}
|
||||
|
||||
func (re *realEncoder) putVarint(in int64) {
|
||||
re.off += binary.PutVarint(re.raw[re.off:], in)
|
||||
}
|
||||
|
||||
func (re *realEncoder) putArrayLength(in int) error {
|
||||
re.putInt32(int32(in))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) putBool(in bool) {
|
||||
if in {
|
||||
re.putInt8(1)
|
||||
return
|
||||
}
|
||||
re.putInt8(0)
|
||||
}
|
||||
|
||||
// collection
|
||||
|
||||
func (re *realEncoder) putRawBytes(in []byte) error {
|
||||
|
@ -54,10 +66,17 @@ func (re *realEncoder) putBytes(in []byte) error {
|
|||
return nil
|
||||
}
|
||||
re.putInt32(int32(len(in)))
|
||||
copy(re.raw[re.off:], in)
|
||||
re.off += len(in)
|
||||
return re.putRawBytes(in)
|
||||
}
|
||||
|
||||
func (re *realEncoder) putVarintBytes(in []byte) error {
|
||||
if in == nil {
|
||||
re.putVarint(-1)
|
||||
return nil
|
||||
}
|
||||
re.putVarint(int64(len(in)))
|
||||
return re.putRawBytes(in)
|
||||
}
|
||||
|
||||
func (re *realEncoder) putString(in string) error {
|
||||
re.putInt16(int16(len(in)))
|
||||
|
@ -66,6 +85,14 @@ func (re *realEncoder) putString(in string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) putNullableString(in *string) error {
|
||||
if in == nil {
|
||||
re.putInt16(-1)
|
||||
return nil
|
||||
}
|
||||
return re.putString(*in)
|
||||
}
|
||||
|
||||
func (re *realEncoder) putStringArray(in []string) error {
|
||||
err := re.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
|
|
113
vendor/src/gopkg.in/Shopify/sarama.v1/record.go
vendored
Normal file
113
vendor/src/gopkg.in/Shopify/sarama.v1/record.go
vendored
Normal file
|
@ -0,0 +1,113 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
controlMask = 0x20
|
||||
maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1
|
||||
)
|
||||
|
||||
type RecordHeader struct {
|
||||
Key []byte
|
||||
Value []byte
|
||||
}
|
||||
|
||||
func (h *RecordHeader) encode(pe packetEncoder) error {
|
||||
if err := pe.putVarintBytes(h.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
return pe.putVarintBytes(h.Value)
|
||||
}
|
||||
|
||||
func (h *RecordHeader) decode(pd packetDecoder) (err error) {
|
||||
if h.Key, err = pd.getVarintBytes(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if h.Value, err = pd.getVarintBytes(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Record struct {
|
||||
Attributes int8
|
||||
TimestampDelta time.Duration
|
||||
OffsetDelta int64
|
||||
Key []byte
|
||||
Value []byte
|
||||
Headers []*RecordHeader
|
||||
|
||||
length varintLengthField
|
||||
}
|
||||
|
||||
func (r *Record) encode(pe packetEncoder) error {
|
||||
pe.push(&r.length)
|
||||
pe.putInt8(r.Attributes)
|
||||
pe.putVarint(int64(r.TimestampDelta / time.Millisecond))
|
||||
pe.putVarint(r.OffsetDelta)
|
||||
if err := pe.putVarintBytes(r.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putVarintBytes(r.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putVarint(int64(len(r.Headers)))
|
||||
|
||||
for _, h := range r.Headers {
|
||||
if err := h.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
func (r *Record) decode(pd packetDecoder) (err error) {
|
||||
if err = pd.push(&r.length); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.Attributes, err = pd.getInt8(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
timestamp, err := pd.getVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.TimestampDelta = time.Duration(timestamp) * time.Millisecond
|
||||
|
||||
if r.OffsetDelta, err = pd.getVarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.Key, err = pd.getVarintBytes(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.Value, err = pd.getVarintBytes(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numHeaders, err := pd.getVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numHeaders >= 0 {
|
||||
r.Headers = make([]*RecordHeader, numHeaders)
|
||||
}
|
||||
for i := int64(0); i < numHeaders; i++ {
|
||||
hdr := new(RecordHeader)
|
||||
if err := hdr.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Headers[i] = hdr
|
||||
}
|
||||
|
||||
return pd.pop()
|
||||
}
|
265
vendor/src/gopkg.in/Shopify/sarama.v1/record_batch.go
vendored
Normal file
265
vendor/src/gopkg.in/Shopify/sarama.v1/record_batch.go
vendored
Normal file
|
@ -0,0 +1,265 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/eapache/go-xerial-snappy"
|
||||
"github.com/pierrec/lz4"
|
||||
)
|
||||
|
||||
const recordBatchOverhead = 49
|
||||
|
||||
type recordsArray []*Record
|
||||
|
||||
func (e recordsArray) encode(pe packetEncoder) error {
|
||||
for _, r := range e {
|
||||
if err := r.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e recordsArray) decode(pd packetDecoder) error {
|
||||
for i := range e {
|
||||
rec := &Record{}
|
||||
if err := rec.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
e[i] = rec
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type RecordBatch struct {
|
||||
FirstOffset int64
|
||||
PartitionLeaderEpoch int32
|
||||
Version int8
|
||||
Codec CompressionCodec
|
||||
Control bool
|
||||
LastOffsetDelta int32
|
||||
FirstTimestamp time.Time
|
||||
MaxTimestamp time.Time
|
||||
ProducerID int64
|
||||
ProducerEpoch int16
|
||||
FirstSequence int32
|
||||
Records []*Record
|
||||
PartialTrailingRecord bool
|
||||
|
||||
compressedRecords []byte
|
||||
recordsLen int // uncompressed records size
|
||||
}
|
||||
|
||||
func (b *RecordBatch) encode(pe packetEncoder) error {
|
||||
if b.Version != 2 {
|
||||
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
|
||||
}
|
||||
pe.putInt64(b.FirstOffset)
|
||||
pe.push(&lengthField{})
|
||||
pe.putInt32(b.PartitionLeaderEpoch)
|
||||
pe.putInt8(b.Version)
|
||||
pe.push(newCRC32Field(crcCastagnoli))
|
||||
pe.putInt16(b.computeAttributes())
|
||||
pe.putInt32(b.LastOffsetDelta)
|
||||
|
||||
if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt64(b.ProducerID)
|
||||
pe.putInt16(b.ProducerEpoch)
|
||||
pe.putInt32(b.FirstSequence)
|
||||
|
||||
if err := pe.putArrayLength(len(b.Records)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.compressedRecords == nil {
|
||||
if err := b.encodeRecords(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := pe.putRawBytes(b.compressedRecords); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.pop(); err != nil {
|
||||
return err
|
||||
}
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
func (b *RecordBatch) decode(pd packetDecoder) (err error) {
|
||||
if b.FirstOffset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
batchLen, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.Version, err = pd.getInt8(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attributes, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
|
||||
b.Control = attributes&controlMask == controlMask
|
||||
|
||||
if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.ProducerID, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.ProducerEpoch, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.FirstSequence, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numRecs, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if numRecs >= 0 {
|
||||
b.Records = make([]*Record, numRecs)
|
||||
}
|
||||
|
||||
bufSize := int(batchLen) - recordBatchOverhead
|
||||
recBuffer, err := pd.getRawBytes(bufSize)
|
||||
if err != nil {
|
||||
if err == ErrInsufficientData {
|
||||
b.PartialTrailingRecord = true
|
||||
b.Records = nil
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err = pd.pop(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch b.Codec {
|
||||
case CompressionNone:
|
||||
case CompressionGZIP:
|
||||
reader, err := gzip.NewReader(bytes.NewReader(recBuffer))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if recBuffer, err = ioutil.ReadAll(reader); err != nil {
|
||||
return err
|
||||
}
|
||||
case CompressionSnappy:
|
||||
if recBuffer, err = snappy.Decode(recBuffer); err != nil {
|
||||
return err
|
||||
}
|
||||
case CompressionLZ4:
|
||||
reader := lz4.NewReader(bytes.NewReader(recBuffer))
|
||||
if recBuffer, err = ioutil.ReadAll(reader); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", b.Codec)}
|
||||
}
|
||||
|
||||
b.recordsLen = len(recBuffer)
|
||||
err = decode(recBuffer, recordsArray(b.Records))
|
||||
if err == ErrInsufficientData {
|
||||
b.PartialTrailingRecord = true
|
||||
b.Records = nil
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
|
||||
var raw []byte
|
||||
if b.Codec != CompressionNone {
|
||||
var err error
|
||||
if raw, err = encode(recordsArray(b.Records), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
b.recordsLen = len(raw)
|
||||
}
|
||||
|
||||
switch b.Codec {
|
||||
case CompressionNone:
|
||||
offset := pe.offset()
|
||||
if err := recordsArray(b.Records).encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
b.recordsLen = pe.offset() - offset
|
||||
case CompressionGZIP:
|
||||
var buf bytes.Buffer
|
||||
writer := gzip.NewWriter(&buf)
|
||||
if _, err := writer.Write(raw); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.compressedRecords = buf.Bytes()
|
||||
case CompressionSnappy:
|
||||
b.compressedRecords = snappy.Encode(raw)
|
||||
case CompressionLZ4:
|
||||
var buf bytes.Buffer
|
||||
writer := lz4.NewWriter(&buf)
|
||||
if _, err := writer.Write(raw); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.compressedRecords = buf.Bytes()
|
||||
default:
|
||||
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *RecordBatch) computeAttributes() int16 {
|
||||
attr := int16(b.Codec) & int16(compressionCodecMask)
|
||||
if b.Control {
|
||||
attr |= controlMask
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
func (b *RecordBatch) addRecord(r *Record) {
|
||||
b.Records = append(b.Records, r)
|
||||
}
|
284
vendor/src/gopkg.in/Shopify/sarama.v1/record_test.go
vendored
Normal file
284
vendor/src/gopkg.in/Shopify/sarama.v1/record_test.go
vendored
Normal file
|
@ -0,0 +1,284 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
var recordBatchTestCases = []struct {
|
||||
name string
|
||||
batch RecordBatch
|
||||
encoded []byte
|
||||
oldGoEncoded []byte // used in case of gzipped content for go versions prior to 1.8
|
||||
}{
|
||||
{
|
||||
name: "empty record",
|
||||
batch: RecordBatch{
|
||||
Version: 2,
|
||||
FirstTimestamp: time.Unix(0, 0),
|
||||
MaxTimestamp: time.Unix(0, 0),
|
||||
Records: []*Record{},
|
||||
},
|
||||
encoded: []byte{
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
|
||||
0, 0, 0, 49, // Length
|
||||
0, 0, 0, 0, // Partition Leader Epoch
|
||||
2, // Version
|
||||
89, 95, 183, 221, // CRC
|
||||
0, 0, // Attributes
|
||||
0, 0, 0, 0, // Last Offset Delta
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // First Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
|
||||
0, 0, // Producer Epoch
|
||||
0, 0, 0, 0, // First Sequence
|
||||
0, 0, 0, 0, // Number of Records
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "control batch",
|
||||
batch: RecordBatch{
|
||||
Version: 2,
|
||||
Control: true,
|
||||
FirstTimestamp: time.Unix(0, 0),
|
||||
MaxTimestamp: time.Unix(0, 0),
|
||||
Records: []*Record{},
|
||||
},
|
||||
encoded: []byte{
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
|
||||
0, 0, 0, 49, // Length
|
||||
0, 0, 0, 0, // Partition Leader Epoch
|
||||
2, // Version
|
||||
81, 46, 67, 217, // CRC
|
||||
0, 32, // Attributes
|
||||
0, 0, 0, 0, // Last Offset Delta
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // First Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
|
||||
0, 0, // Producer Epoch
|
||||
0, 0, 0, 0, // First Sequence
|
||||
0, 0, 0, 0, // Number of Records
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "uncompressed record",
|
||||
batch: RecordBatch{
|
||||
Version: 2,
|
||||
FirstTimestamp: time.Unix(1479847795, 0),
|
||||
MaxTimestamp: time.Unix(0, 0),
|
||||
Records: []*Record{{
|
||||
TimestampDelta: 5 * time.Millisecond,
|
||||
Key: []byte{1, 2, 3, 4},
|
||||
Value: []byte{5, 6, 7},
|
||||
Headers: []*RecordHeader{{
|
||||
Key: []byte{8, 9, 10},
|
||||
Value: []byte{11, 12},
|
||||
}},
|
||||
}},
|
||||
recordsLen: 21,
|
||||
},
|
||||
encoded: []byte{
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
|
||||
0, 0, 0, 70, // Length
|
||||
0, 0, 0, 0, // Partition Leader Epoch
|
||||
2, // Version
|
||||
84, 121, 97, 253, // CRC
|
||||
0, 0, // Attributes
|
||||
0, 0, 0, 0, // Last Offset Delta
|
||||
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
|
||||
0, 0, // Producer Epoch
|
||||
0, 0, 0, 0, // First Sequence
|
||||
0, 0, 0, 1, // Number of Records
|
||||
40, // Record Length
|
||||
0, // Attributes
|
||||
10, // Timestamp Delta
|
||||
0, // Offset Delta
|
||||
8, // Key Length
|
||||
1, 2, 3, 4,
|
||||
6, // Value Length
|
||||
5, 6, 7,
|
||||
2, // Number of Headers
|
||||
6, // Header Key Length
|
||||
8, 9, 10, // Header Key
|
||||
4, // Header Value Length
|
||||
11, 12, // Header Value
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gzipped record",
|
||||
batch: RecordBatch{
|
||||
Version: 2,
|
||||
Codec: CompressionGZIP,
|
||||
FirstTimestamp: time.Unix(1479847795, 0),
|
||||
MaxTimestamp: time.Unix(0, 0),
|
||||
Records: []*Record{{
|
||||
TimestampDelta: 5 * time.Millisecond,
|
||||
Key: []byte{1, 2, 3, 4},
|
||||
Value: []byte{5, 6, 7},
|
||||
Headers: []*RecordHeader{{
|
||||
Key: []byte{8, 9, 10},
|
||||
Value: []byte{11, 12},
|
||||
}},
|
||||
}},
|
||||
recordsLen: 21,
|
||||
},
|
||||
encoded: []byte{
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
|
||||
0, 0, 0, 94, // Length
|
||||
0, 0, 0, 0, // Partition Leader Epoch
|
||||
2, // Version
|
||||
159, 236, 182, 189, // CRC
|
||||
0, 1, // Attributes
|
||||
0, 0, 0, 0, // Last Offset Delta
|
||||
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
|
||||
0, 0, // Producer Epoch
|
||||
0, 0, 0, 0, // First Sequence
|
||||
0, 0, 0, 1, // Number of Records
|
||||
31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 210, 96, 224, 98, 224, 96, 100, 98, 102, 97, 99, 101,
|
||||
99, 103, 98, 227, 224, 228, 98, 225, 230, 1, 4, 0, 0, 255, 255, 173, 201, 88, 103, 21, 0, 0, 0,
|
||||
},
|
||||
oldGoEncoded: []byte{
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
|
||||
0, 0, 0, 94, // Length
|
||||
0, 0, 0, 0, // Partition Leader Epoch
|
||||
2, // Version
|
||||
0, 216, 14, 210, // CRC
|
||||
0, 1, // Attributes
|
||||
0, 0, 0, 0, // Last Offset Delta
|
||||
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
|
||||
0, 0, // Producer Epoch
|
||||
0, 0, 0, 0, // First Sequence
|
||||
0, 0, 0, 1, // Number of Records
|
||||
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 210, 96, 224, 98, 224, 96, 100, 98, 102, 97, 99, 101,
|
||||
99, 103, 98, 227, 224, 228, 98, 225, 230, 1, 4, 0, 0, 255, 255, 173, 201, 88, 103, 21, 0, 0, 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "snappy compressed record",
|
||||
batch: RecordBatch{
|
||||
Version: 2,
|
||||
Codec: CompressionSnappy,
|
||||
FirstTimestamp: time.Unix(1479847795, 0),
|
||||
MaxTimestamp: time.Unix(0, 0),
|
||||
Records: []*Record{{
|
||||
TimestampDelta: 5 * time.Millisecond,
|
||||
Key: []byte{1, 2, 3, 4},
|
||||
Value: []byte{5, 6, 7},
|
||||
Headers: []*RecordHeader{{
|
||||
Key: []byte{8, 9, 10},
|
||||
Value: []byte{11, 12},
|
||||
}},
|
||||
}},
|
||||
recordsLen: 21,
|
||||
},
|
||||
encoded: []byte{
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
|
||||
0, 0, 0, 72, // Length
|
||||
0, 0, 0, 0, // Partition Leader Epoch
|
||||
2, // Version
|
||||
21, 0, 159, 97, // CRC
|
||||
0, 2, // Attributes
|
||||
0, 0, 0, 0, // Last Offset Delta
|
||||
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
|
||||
0, 0, // Producer Epoch
|
||||
0, 0, 0, 0, // First Sequence
|
||||
0, 0, 0, 1, // Number of Records
|
||||
21, 80, 40, 0, 10, 0, 8, 1, 2, 3, 4, 6, 5, 6, 7, 2, 6, 8, 9, 10, 4, 11, 12,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "lz4 compressed record",
|
||||
batch: RecordBatch{
|
||||
Version: 2,
|
||||
Codec: CompressionLZ4,
|
||||
FirstTimestamp: time.Unix(1479847795, 0),
|
||||
MaxTimestamp: time.Unix(0, 0),
|
||||
Records: []*Record{{
|
||||
TimestampDelta: 5 * time.Millisecond,
|
||||
Key: []byte{1, 2, 3, 4},
|
||||
Value: []byte{5, 6, 7},
|
||||
Headers: []*RecordHeader{{
|
||||
Key: []byte{8, 9, 10},
|
||||
Value: []byte{11, 12},
|
||||
}},
|
||||
}},
|
||||
recordsLen: 21,
|
||||
},
|
||||
encoded: []byte{
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
|
||||
0, 0, 0, 89, // Length
|
||||
0, 0, 0, 0, // Partition Leader Epoch
|
||||
2, // Version
|
||||
169, 74, 119, 197, // CRC
|
||||
0, 3, // Attributes
|
||||
0, 0, 0, 0, // Last Offset Delta
|
||||
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
|
||||
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
|
||||
0, 0, // Producer Epoch
|
||||
0, 0, 0, 0, // First Sequence
|
||||
0, 0, 0, 1, // Number of Records
|
||||
4, 34, 77, 24, 100, 112, 185, 21, 0, 0, 128, 40, 0, 10, 0, 8, 1, 2, 3, 4, 6, 5, 6, 7, 2,
|
||||
6, 8, 9, 10, 4, 11, 12, 0, 0, 0, 0, 12, 59, 239, 146,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func isOldGo(t *testing.T) bool {
|
||||
v := strings.Split(runtime.Version()[2:], ".")
|
||||
if len(v) < 2 {
|
||||
t.Logf("Can't parse version: %s", runtime.Version())
|
||||
return false
|
||||
}
|
||||
maj, err := strconv.Atoi(v[0])
|
||||
if err != nil {
|
||||
t.Logf("Can't parse version: %s", runtime.Version())
|
||||
return false
|
||||
}
|
||||
min, err := strconv.Atoi(v[1])
|
||||
if err != nil {
|
||||
t.Logf("Can't parse version: %s", runtime.Version())
|
||||
return false
|
||||
}
|
||||
return maj < 1 || (maj == 1 && min < 8)
|
||||
}
|
||||
|
||||
func TestRecordBatchEncoding(t *testing.T) {
|
||||
for _, tc := range recordBatchTestCases {
|
||||
if tc.oldGoEncoded != nil && isOldGo(t) {
|
||||
testEncodable(t, tc.name, &tc.batch, tc.oldGoEncoded)
|
||||
} else {
|
||||
testEncodable(t, tc.name, &tc.batch, tc.encoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecordBatchDecoding(t *testing.T) {
|
||||
for _, tc := range recordBatchTestCases {
|
||||
batch := RecordBatch{}
|
||||
testDecodable(t, tc.name, &batch, tc.encoded)
|
||||
for _, r := range batch.Records {
|
||||
r.length = varintLengthField{}
|
||||
}
|
||||
for _, r := range tc.batch.Records {
|
||||
r.length = varintLengthField{}
|
||||
}
|
||||
if !reflect.DeepEqual(batch, tc.batch) {
|
||||
t.Errorf(spew.Sprintf("invalid decode of %s\ngot %+v\nwanted %+v", tc.name, batch, tc.batch))
|
||||
}
|
||||
}
|
||||
}
|
167
vendor/src/gopkg.in/Shopify/sarama.v1/records.go
vendored
Normal file
167
vendor/src/gopkg.in/Shopify/sarama.v1/records.go
vendored
Normal file
|
@ -0,0 +1,167 @@
|
|||
package sarama
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
unknownRecords = iota
|
||||
legacyRecords
|
||||
defaultRecords
|
||||
|
||||
magicOffset = 16
|
||||
magicLength = 1
|
||||
)
|
||||
|
||||
// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
|
||||
type Records struct {
|
||||
recordsType int
|
||||
msgSet *MessageSet
|
||||
recordBatch *RecordBatch
|
||||
}
|
||||
|
||||
func newLegacyRecords(msgSet *MessageSet) Records {
|
||||
return Records{recordsType: legacyRecords, msgSet: msgSet}
|
||||
}
|
||||
|
||||
func newDefaultRecords(batch *RecordBatch) Records {
|
||||
return Records{recordsType: defaultRecords, recordBatch: batch}
|
||||
}
|
||||
|
||||
// setTypeFromFields sets type of Records depending on which of msgSet or recordBatch is not nil.
|
||||
// The first return value indicates whether both fields are nil (and the type is not set).
|
||||
// If both fields are not nil, it returns an error.
|
||||
func (r *Records) setTypeFromFields() (bool, error) {
|
||||
if r.msgSet == nil && r.recordBatch == nil {
|
||||
return true, nil
|
||||
}
|
||||
if r.msgSet != nil && r.recordBatch != nil {
|
||||
return false, fmt.Errorf("both msgSet and recordBatch are set, but record type is unknown")
|
||||
}
|
||||
r.recordsType = defaultRecords
|
||||
if r.msgSet != nil {
|
||||
r.recordsType = legacyRecords
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *Records) encode(pe packetEncoder) error {
|
||||
if r.recordsType == unknownRecords {
|
||||
if empty, err := r.setTypeFromFields(); err != nil || empty {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch r.recordsType {
|
||||
case legacyRecords:
|
||||
if r.msgSet == nil {
|
||||
return nil
|
||||
}
|
||||
return r.msgSet.encode(pe)
|
||||
case defaultRecords:
|
||||
if r.recordBatch == nil {
|
||||
return nil
|
||||
}
|
||||
return r.recordBatch.encode(pe)
|
||||
}
|
||||
return fmt.Errorf("unknown records type: %v", r.recordsType)
|
||||
}
|
||||
|
||||
func (r *Records) setTypeFromMagic(pd packetDecoder) error {
|
||||
dec, err := pd.peek(magicOffset, magicLength)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
magic, err := dec.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.recordsType = defaultRecords
|
||||
if magic < 2 {
|
||||
r.recordsType = legacyRecords
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Records) decode(pd packetDecoder) error {
|
||||
if r.recordsType == unknownRecords {
|
||||
if err := r.setTypeFromMagic(pd); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
switch r.recordsType {
|
||||
case legacyRecords:
|
||||
r.msgSet = &MessageSet{}
|
||||
return r.msgSet.decode(pd)
|
||||
case defaultRecords:
|
||||
r.recordBatch = &RecordBatch{}
|
||||
return r.recordBatch.decode(pd)
|
||||
}
|
||||
return fmt.Errorf("unknown records type: %v", r.recordsType)
|
||||
}
|
||||
|
||||
func (r *Records) numRecords() (int, error) {
|
||||
if r.recordsType == unknownRecords {
|
||||
if empty, err := r.setTypeFromFields(); err != nil || empty {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
switch r.recordsType {
|
||||
case legacyRecords:
|
||||
if r.msgSet == nil {
|
||||
return 0, nil
|
||||
}
|
||||
return len(r.msgSet.Messages), nil
|
||||
case defaultRecords:
|
||||
if r.recordBatch == nil {
|
||||
return 0, nil
|
||||
}
|
||||
return len(r.recordBatch.Records), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
|
||||
}
|
||||
|
||||
func (r *Records) isPartial() (bool, error) {
|
||||
if r.recordsType == unknownRecords {
|
||||
if empty, err := r.setTypeFromFields(); err != nil || empty {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
switch r.recordsType {
|
||||
case unknownRecords:
|
||||
return false, nil
|
||||
case legacyRecords:
|
||||
if r.msgSet == nil {
|
||||
return false, nil
|
||||
}
|
||||
return r.msgSet.PartialTrailingMessage, nil
|
||||
case defaultRecords:
|
||||
if r.recordBatch == nil {
|
||||
return false, nil
|
||||
}
|
||||
return r.recordBatch.PartialTrailingRecord, nil
|
||||
}
|
||||
return false, fmt.Errorf("unknown records type: %v", r.recordsType)
|
||||
}
|
||||
|
||||
func (r *Records) isControl() (bool, error) {
|
||||
if r.recordsType == unknownRecords {
|
||||
if empty, err := r.setTypeFromFields(); err != nil || empty {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
switch r.recordsType {
|
||||
case legacyRecords:
|
||||
return false, nil
|
||||
case defaultRecords:
|
||||
if r.recordBatch == nil {
|
||||
return false, nil
|
||||
}
|
||||
return r.recordBatch.Control, nil
|
||||
}
|
||||
return false, fmt.Errorf("unknown records type: %v", r.recordsType)
|
||||
}
|
143
vendor/src/gopkg.in/Shopify/sarama.v1/records_test.go
vendored
Normal file
143
vendor/src/gopkg.in/Shopify/sarama.v1/records_test.go
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLegacyRecords(t *testing.T) {
|
||||
set := &MessageSet{
|
||||
Messages: []*MessageBlock{
|
||||
{
|
||||
Msg: &Message{
|
||||
Version: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
r := newLegacyRecords(set)
|
||||
|
||||
exp, err := encode(set, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buf, err := encode(&r, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(buf, exp) {
|
||||
t.Errorf("Wrong encoding for legacy records, wanted %v, got %v", exp, buf)
|
||||
}
|
||||
|
||||
set = &MessageSet{}
|
||||
r = Records{}
|
||||
|
||||
err = decode(exp, set)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = decode(buf, &r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if r.recordsType != legacyRecords {
|
||||
t.Fatalf("Wrong records type %v, expected %v", r.recordsType, legacyRecords)
|
||||
}
|
||||
if !reflect.DeepEqual(set, r.msgSet) {
|
||||
t.Errorf("Wrong decoding for legacy records, wanted %#+v, got %#+v", set, r.msgSet)
|
||||
}
|
||||
|
||||
n, err := r.numRecords()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Errorf("Wrong number of records, wanted 1, got %d", n)
|
||||
}
|
||||
|
||||
p, err := r.isPartial()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if p {
|
||||
t.Errorf("MessageSet shouldn't have a partial trailing message")
|
||||
}
|
||||
|
||||
c, err := r.isControl()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c {
|
||||
t.Errorf("MessageSet can't be a control batch")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultRecords(t *testing.T) {
|
||||
batch := &RecordBatch{
|
||||
Version: 2,
|
||||
Records: []*Record{
|
||||
{
|
||||
Value: []byte{1},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
r := newDefaultRecords(batch)
|
||||
|
||||
exp, err := encode(batch, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buf, err := encode(&r, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(buf, exp) {
|
||||
t.Errorf("Wrong encoding for default records, wanted %v, got %v", exp, buf)
|
||||
}
|
||||
|
||||
batch = &RecordBatch{}
|
||||
r = Records{}
|
||||
|
||||
err = decode(exp, batch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = decode(buf, &r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if r.recordsType != defaultRecords {
|
||||
t.Fatalf("Wrong records type %v, expected %v", r.recordsType, defaultRecords)
|
||||
}
|
||||
if !reflect.DeepEqual(batch, r.recordBatch) {
|
||||
t.Errorf("Wrong decoding for default records, wanted %#+v, got %#+v", batch, r.recordBatch)
|
||||
}
|
||||
|
||||
n, err := r.numRecords()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Errorf("Wrong number of records, wanted 1, got %d", n)
|
||||
}
|
||||
|
||||
p, err := r.isPartial()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if p {
|
||||
t.Errorf("RecordBatch shouldn't have a partial trailing record")
|
||||
}
|
||||
|
||||
c, err := r.isControl()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c {
|
||||
t.Errorf("RecordBatch shouldn't be a control batch")
|
||||
}
|
||||
}
|
|
@ -114,6 +114,8 @@ func allocateBody(key, version int16) protocolBody {
|
|||
return &SaslHandshakeRequest{}
|
||||
case 18:
|
||||
return &ApiVersionsRequest{}
|
||||
case 37:
|
||||
return &CreatePartitionsRequest{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -50,7 +50,11 @@ func testVersionDecodable(t *testing.T, name string, out versionedDecoder, in []
|
|||
}
|
||||
|
||||
func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) {
|
||||
// Encoder request
|
||||
packet := testRequestEncode(t, name, rb, expected)
|
||||
testRequestDecode(t, name, rb, packet)
|
||||
}
|
||||
|
||||
func testRequestEncode(t *testing.T, name string, rb protocolBody, expected []byte) []byte {
|
||||
req := &request{correlationID: 123, clientID: "foo", body: rb}
|
||||
packet, err := encode(req, nil)
|
||||
headerSize := 14 + len("foo")
|
||||
|
@ -59,7 +63,10 @@ func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) {
|
|||
} else if !bytes.Equal(packet[headerSize:], expected) {
|
||||
t.Error("Encoding", name, "failed\ngot ", packet[headerSize:], "\nwant", expected)
|
||||
}
|
||||
// Decoder request
|
||||
return packet
|
||||
}
|
||||
|
||||
func testRequestDecode(t *testing.T, name string, rb protocolBody, packet []byte) {
|
||||
decoded, n, err := decodeRequest(bytes.NewReader(packet))
|
||||
if err != nil {
|
||||
t.Error("Failed to decode request", err)
|
||||
|
|
|
@ -11,13 +11,13 @@ func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
var err error
|
||||
r.Err = KError(kerr)
|
||||
|
||||
if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -17,12 +17,13 @@ func (r *SyncGroupResponse) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
r.Err = KError(kerr)
|
||||
|
||||
r.MemberAssignment, err = pd.getBytes()
|
||||
return
|
||||
}
|
||||
|
|
|
@ -25,10 +25,10 @@ type SyncProducer interface {
|
|||
// SendMessages will return an error.
|
||||
SendMessages(msgs []*ProducerMessage) error
|
||||
|
||||
// Close shuts down the producer and flushes any messages it may have buffered.
|
||||
// You must call this function before a producer object passes out of scope, as
|
||||
// it may otherwise leak memory. You must call this before calling Close on the
|
||||
// underlying client.
|
||||
// Close shuts down the producer and waits for any buffered messages to be
|
||||
// flushed. You must call this function before a producer object passes out of
|
||||
// scope, as it may otherwise leak memory. You must call this before calling
|
||||
// Close on the underlying client.
|
||||
Close() error
|
||||
}
|
||||
|
||||
|
|
|
@ -76,17 +76,17 @@ func TestSyncProducerBatch(t *testing.T) {
|
|||
}
|
||||
|
||||
err = producer.SendMessages([]*ProducerMessage{
|
||||
&ProducerMessage{
|
||||
{
|
||||
Topic: "my_topic",
|
||||
Value: StringEncoder(TestMessage),
|
||||
Metadata: "test",
|
||||
},
|
||||
&ProducerMessage{
|
||||
{
|
||||
Topic: "my_topic",
|
||||
Value: StringEncoder(TestMessage),
|
||||
Metadata: "test",
|
||||
},
|
||||
&ProducerMessage{
|
||||
{
|
||||
Topic: "my_topic",
|
||||
Value: StringEncoder(TestMessage),
|
||||
Metadata: "test",
|
||||
|
|
40
vendor/src/gopkg.in/Shopify/sarama.v1/timestamp.go
vendored
Normal file
40
vendor/src/gopkg.in/Shopify/sarama.v1/timestamp.go
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Timestamp struct {
|
||||
*time.Time
|
||||
}
|
||||
|
||||
func (t Timestamp) encode(pe packetEncoder) error {
|
||||
timestamp := int64(-1)
|
||||
|
||||
if !t.Before(time.Unix(0, 0)) {
|
||||
timestamp = t.UnixNano() / int64(time.Millisecond)
|
||||
} else if !t.IsZero() {
|
||||
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)}
|
||||
}
|
||||
|
||||
pe.putInt64(timestamp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t Timestamp) decode(pd packetDecoder) error {
|
||||
millis, err := pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// negative timestamps are invalid, in these cases we should return
|
||||
// a zero time
|
||||
timestamp := time.Time{}
|
||||
if millis >= 0 {
|
||||
timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
|
||||
}
|
||||
|
||||
*t.Time = timestamp
|
||||
return nil
|
||||
}
|
40
vendor/src/gopkg.in/Shopify/sarama.v1/utils.go
vendored
40
vendor/src/gopkg.in/Shopify/sarama.v1/utils.go
vendored
|
@ -2,8 +2,9 @@ package sarama
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
type none struct{}
|
||||
|
@ -23,13 +24,11 @@ func (slice int32Slice) Swap(i, j int) {
|
|||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
func dupeAndSort(input []int32) []int32 {
|
||||
func dupInt32Slice(input []int32) []int32 {
|
||||
ret := make([]int32, 0, len(input))
|
||||
for _, val := range input {
|
||||
ret = append(ret, val)
|
||||
}
|
||||
|
||||
sort.Sort(int32Slice(ret))
|
||||
return ret
|
||||
}
|
||||
|
||||
|
@ -148,5 +147,38 @@ var (
|
|||
V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
|
||||
V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
|
||||
V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
|
||||
V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
|
||||
V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
|
||||
V1_0_0_0 = newKafkaVersion(1, 0, 0, 0)
|
||||
minVersion = V0_8_2_0
|
||||
)
|
||||
|
||||
func ParseKafkaVersion(s string) (KafkaVersion, error) {
|
||||
var major, minor, veryMinor, patch uint
|
||||
var err error
|
||||
if s[0] == '0' {
|
||||
err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
|
||||
} else {
|
||||
err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
|
||||
}
|
||||
if err != nil {
|
||||
return minVersion, err
|
||||
}
|
||||
return newKafkaVersion(major, minor, veryMinor, patch), nil
|
||||
}
|
||||
|
||||
func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error {
|
||||
if !regexp.MustCompile(pattern).MatchString(s) {
|
||||
return fmt.Errorf("invalid version `%s`", s)
|
||||
}
|
||||
_, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
|
||||
return err
|
||||
}
|
||||
|
||||
func (v KafkaVersion) String() string {
|
||||
if v.version[0] == 0 {
|
||||
return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
|
||||
} else {
|
||||
return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,3 +19,23 @@ func TestVersionCompare(t *testing.T) {
|
|||
t.Error("0.8.2.1 >= 0.10.0.0")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVersionParsing(t *testing.T) {
|
||||
validVersions := []string{"0.8.2.0", "0.8.2.1", "0.9.0.0", "0.10.2.0", "1.0.0"}
|
||||
for _, s := range validVersions {
|
||||
v, err := ParseKafkaVersion(s)
|
||||
if err != nil {
|
||||
t.Errorf("could not parse valid version %s: %s", s, err)
|
||||
}
|
||||
if v.String() != s {
|
||||
t.Errorf("version %s != %s", v.String(), s)
|
||||
}
|
||||
}
|
||||
|
||||
invalidVersions := []string{"0.8.2-4", "0.8.20", "1.19.0.0", "1.0.x"}
|
||||
for _, s := range invalidVersions {
|
||||
if _, err := ParseKafkaVersion(s); err == nil {
|
||||
t.Errorf("invalid version %s parsed without error", s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ TOXIPROXY_VERSION=2.0.0
|
|||
|
||||
mkdir -p ${KAFKA_INSTALL_ROOT}
|
||||
if [ ! -f ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz ]; then
|
||||
wget --quiet http://apache.mirror.gtcomm.net/kafka/${KAFKA_VERSION}/kafka_2.10-${KAFKA_VERSION}.tgz -O ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz
|
||||
wget --quiet http://apache.mirror.gtcomm.net/kafka/${KAFKA_VERSION}/kafka_2.11-${KAFKA_VERSION}.tgz -O ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz
|
||||
fi
|
||||
if [ ! -f ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} ]; then
|
||||
wget --quiet https://github.com/Shopify/toxiproxy/releases/download/v${TOXIPROXY_VERSION}/toxiproxy-server-linux-amd64 -O ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION}
|
||||
|
|
Loading…
Reference in a new issue