diff --git a/go.mod b/go.mod index 83368df..d7a5a58 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,8 @@ require ( github.com/spf13/cobra v1.6.1 github.com/spf13/viper v1.0.2 github.com/stretchr/testify v1.8.1 + github.com/twmb/franz-go v1.17.1 + github.com/twmb/franz-go/pkg/kmsg v1.8.0 github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76 golang.org/x/net v0.23.0 diff --git a/go.sum b/go.sum index ee932fa..4bca45b 100644 --- a/go.sum +++ b/go.sum @@ -252,6 +252,10 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/twmb/franz-go v1.17.1 h1:0LwPsbbJeJ9R91DPUHSEd4su82WJWcTY1Zzbgbg4CeQ= +github.com/twmb/franz-go v1.17.1/go.mod h1:NreRdJ2F7dziDY/m6VyspWd6sNxHKXdMZI42UfQ3GXM= +github.com/twmb/franz-go/pkg/kmsg v1.8.0 h1:lAQB9Z3aMrIP9qF9288XcFf/ccaSxEitNA1CDTEIeTA= +github.com/twmb/franz-go/pkg/kmsg v1.8.0/go.mod h1:HzYEb8G3uu5XevZbtU0dVbkphaKTHk0X68N5ka4q6mU= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= diff --git a/proxy/client.go b/proxy/client.go index e60cf02..42352be 100644 --- a/proxy/client.go +++ b/proxy/client.go @@ -305,7 +305,18 @@ func (c *Client) handleConn(conn Conn) { } c.conns.Add(conn.BrokerAddress, conn.LocalConnection) localDesc := "local connection on " + conn.LocalConnection.LocalAddr().String() + " from " + conn.LocalConnection.RemoteAddr().String() + " (" + conn.BrokerAddress + ")" - copyThenClose(c.processorConfig, server, conn.LocalConnection, conn.BrokerAddress, conn.BrokerAddress, localDesc) + + var id *string + + tlsConn, ok := conn.LocalConnection.(*tls.Conn) + if ok { + if len(tlsConn.ConnectionState().PeerCertificates) > 0 { + commonName := tlsConn.ConnectionState().PeerCertificates[0].Subject.CommonName + id = &commonName + } + } + + copyThenClose(c.processorConfig, server, conn.LocalConnection, conn.BrokerAddress, id, conn.BrokerAddress, localDesc) if err := c.conns.Remove(conn.BrokerAddress, conn.LocalConnection); err != nil { logrus.Info(err) } diff --git a/proxy/common.go b/proxy/common.go index af6d3cb..ad5dc19 100644 --- a/proxy/common.go +++ b/proxy/common.go @@ -4,11 +4,12 @@ import ( "bytes" "errors" "fmt" - "github.com/sirupsen/logrus" "io" "net" "sync" "time" + + "github.com/sirupsen/logrus" ) type DeadlineReadWriteCloser interface { @@ -111,9 +112,9 @@ func copyError(readDesc, writeDesc string, readErr bool, err error) { logrus.Infof("%v had error: %s", desc, err.Error()) } -func copyThenClose(cfg ProcessorConfig, remote, local DeadlineReadWriteCloser, brokerAddress string, remoteDesc, localDesc string) { +func copyThenClose(cfg ProcessorConfig, remote, local DeadlineReadWriteCloser, brokerAddress string, id *string, remoteDesc, localDesc string) { - processor := newProcessor(cfg, brokerAddress) + processor := newProcessor(cfg, brokerAddress, id) firstErr := make(chan error, 1) diff --git a/proxy/processor.go b/proxy/processor.go index 1f61bf7..becd278 100644 --- a/proxy/processor.go +++ b/proxy/processor.go @@ -2,9 +2,10 @@ package proxy import ( "errors" + "time" + "github.com/grepplabs/kafka-proxy/config" "github.com/grepplabs/kafka-proxy/proxy/protocol" - "time" ) const ( @@ -17,6 +18,7 @@ const ( minOpenRequests = 16 apiKeyProduce = int16(0) + apiKeyFetch = int16(1) apiKeySaslHandshake = int16(17) apiKeyApiApiVersions = int16(18) @@ -61,11 +63,14 @@ type processor struct { forbiddenApiKeys map[int16]struct{} // metrics brokerAddress string + + clientID *string + // producer will never send request with acks=0 producerAcks0Disabled bool } -func newProcessor(cfg ProcessorConfig, brokerAddress string) *processor { +func newProcessor(cfg ProcessorConfig, brokerAddress string, id *string) *processor { maxOpenRequests := cfg.MaxOpenRequests if maxOpenRequests < minOpenRequests { maxOpenRequests = minOpenRequests @@ -103,6 +108,7 @@ func newProcessor(cfg ProcessorConfig, brokerAddress string) *processor { readTimeout: readTimeout, writeTimeout: writeTimeout, brokerAddress: brokerAddress, + clientID: id, localSasl: cfg.LocalSasl, authServer: cfg.AuthServer, forbiddenApiKeys: cfg.ForbiddenApiKeys, @@ -125,6 +131,7 @@ func (p *processor) RequestsLoop(dst DeadlineWriter, src DeadlineReaderWriter) ( nextResponseHandlerChannel: p.nextResponseHandlerChannel, timeout: p.writeTimeout, brokerAddress: p.brokerAddress, + clientID: p.clientID, forbiddenApiKeys: p.forbiddenApiKeys, buf: make([]byte, p.requestBufferSize), localSasl: p.localSasl, @@ -142,6 +149,7 @@ type RequestsLoopContext struct { timeout time.Duration brokerAddress string + clientID *string forbiddenApiKeys map[int16]struct{} buf []byte // bufSize diff --git a/proxy/processor_default.go b/proxy/processor_default.go index 291073a..cef37c8 100644 --- a/proxy/processor_default.go +++ b/proxy/processor_default.go @@ -4,11 +4,14 @@ import ( "bytes" "errors" "fmt" - "github.com/grepplabs/kafka-proxy/proxy/protocol" - "github.com/sirupsen/logrus" "io" "strconv" "time" + + "github.com/grepplabs/kafka-proxy/proxy/protocol" + "github.com/sirupsen/logrus" + "github.com/twmb/franz-go/pkg/kbin" + "github.com/twmb/franz-go/pkg/kmsg" ) type DefaultRequestHandler struct { @@ -85,9 +88,82 @@ func (handler *DefaultRequestHandler) handleRequest(dst DeadlineWriter, src Dead } } - mustReply, readBytes, err := handler.mustReply(requestKeyVersion, src, ctx) - if err != nil { - return true, err + mustReply := true + var readBytes []byte + + switch requestKeyVersion.ApiKey { + case apiKeyProduce: + request := kmsg.NewPtrProduceRequest() + request.SetVersion(requestKeyVersion.ApiVersion) + + readBytes = make([]byte, requestKeyVersion.Length-4) + + _, err := io.ReadFull(src, readBytes) + if err != nil { + return false, + fmt.Errorf("could not read response: %w", err) + } + + reader := kbin.Reader{Src: readBytes} + reader.Uint32() // Correlation ID + reader.String() // client_id + + if request.IsFlexible() { + kmsg.ReadTags(&reader) + } + + err = request.ReadFrom(reader.Src) + if err != nil { + return false, + fmt.Errorf("could not read response: %w", err) + } + + mustReply = request.Acks != 0 + + if ctx.clientID != nil { + for _, topic := range request.Topics { + if *ctx.clientID == "test1" { + if topic.Topic != "allowed" { + return true, errors.New(fmt.Sprintf("Client %s is not allowed to produce to %s", *ctx.clientID, topic.Topic)) + } + } + } + } + case apiKeyFetch: + request := kmsg.NewPtrFetchRequest() + request.SetVersion(requestKeyVersion.ApiVersion) + + readBytes = make([]byte, requestKeyVersion.Length-4) + + _, err := io.ReadFull(src, readBytes) + if err != nil { + return false, + fmt.Errorf("could not read response: %w", err) + } + + reader := kbin.Reader{Src: readBytes} + reader.Uint32() // Correlation ID + reader.String() // client_id + + if request.IsFlexible() { + kmsg.ReadTags(&reader) + } + + err = request.ReadFrom(reader.Src) + if err != nil { + return false, + fmt.Errorf("could not read response: %w", err) + } + + if ctx.clientID != nil { + for _, topic := range request.Topics { + if *ctx.clientID == "test2" { + if topic.Topic != "allowed" { + return true, errors.New(fmt.Sprintf("Client %s is not allowed to produce to %s", *ctx.clientID, topic.Topic)) + } + } + } + } } // send inFlightRequest to channel before myCopyN to prevent race condition in proxyResponses diff --git a/vendor/github.com/twmb/franz-go/LICENSE b/vendor/github.com/twmb/franz-go/LICENSE new file mode 100644 index 0000000..36e1803 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/LICENSE @@ -0,0 +1,24 @@ +Copyright 2020, Travis Bischel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twmb/franz-go/pkg/kbin/primitives.go b/vendor/github.com/twmb/franz-go/pkg/kbin/primitives.go new file mode 100644 index 0000000..487e7f6 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kbin/primitives.go @@ -0,0 +1,856 @@ +// Package kbin contains Kafka primitive reading and writing functions. +package kbin + +import ( + "encoding/binary" + "errors" + "math" + "math/bits" + "reflect" + "unsafe" +) + +// This file contains primitive type encoding and decoding. +// +// The Reader helper can be used even when content runs out +// or an error is hit; all other number requests will return +// zero so a decode will basically no-op. + +// ErrNotEnoughData is returned when a type could not fully decode +// from a slice because the slice did not have enough data. +var ErrNotEnoughData = errors.New("response did not contain enough data to be valid") + +// AppendBool appends 1 for true or 0 for false to dst. +func AppendBool(dst []byte, v bool) []byte { + if v { + return append(dst, 1) + } + return append(dst, 0) +} + +// AppendInt8 appends an int8 to dst. +func AppendInt8(dst []byte, i int8) []byte { + return append(dst, byte(i)) +} + +// AppendInt16 appends a big endian int16 to dst. +func AppendInt16(dst []byte, i int16) []byte { + return AppendUint16(dst, uint16(i)) +} + +// AppendUint16 appends a big endian uint16 to dst. +func AppendUint16(dst []byte, u uint16) []byte { + return append(dst, byte(u>>8), byte(u)) +} + +// AppendInt32 appends a big endian int32 to dst. +func AppendInt32(dst []byte, i int32) []byte { + return AppendUint32(dst, uint32(i)) +} + +// AppendInt64 appends a big endian int64 to dst. +func AppendInt64(dst []byte, i int64) []byte { + return appendUint64(dst, uint64(i)) +} + +// AppendFloat64 appends a big endian float64 to dst. +func AppendFloat64(dst []byte, f float64) []byte { + return appendUint64(dst, math.Float64bits(f)) +} + +// AppendUuid appends the 16 uuid bytes to dst. +func AppendUuid(dst []byte, uuid [16]byte) []byte { + return append(dst, uuid[:]...) +} + +func appendUint64(dst []byte, u uint64) []byte { + return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), + byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// AppendUint32 appends a big endian uint32 to dst. +func AppendUint32(dst []byte, u uint32) []byte { + return append(dst, byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// uvarintLens could only be length 65, but using 256 allows bounds check +// elimination on lookup. +const uvarintLens = "\x01\x01\x01\x01\x01\x01\x01\x01\x02\x02\x02\x02\x02\x02\x02\x03\x03\x03\x03\x03\x03\x03\x04\x04\x04\x04\x04\x04\x04\x05\x05\x05\x05\x05\x05\x05\x06\x06\x06\x06\x06\x06\x06\x07\x07\x07\x07\x07\x07\x07\x08\x08\x08\x08\x08\x08\x08\x09\x09\x09\x09\x09\x09\x09\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + +// VarintLen returns how long i would be if it were varint encoded. +func VarintLen(i int32) int { + u := uint32(i)<<1 ^ uint32(i>>31) + return UvarintLen(u) +} + +// UvarintLen returns how long u would be if it were uvarint encoded. +func UvarintLen(u uint32) int { + return int(uvarintLens[byte(bits.Len32(u))]) +} + +// VarlongLen returns how long i would be if it were varlong encoded. +func VarlongLen(i int64) int { + u := uint64(i)<<1 ^ uint64(i>>63) + return uvarlongLen(u) +} + +func uvarlongLen(u uint64) int { + return int(uvarintLens[byte(bits.Len64(u))]) +} + +// Varint is a loop unrolled 32 bit varint decoder. The return semantics +// are the same as binary.Varint, with the added benefit that overflows +// in 5 byte encodings are handled rather than left to the user. +func Varint(in []byte) (int32, int) { + x, n := Uvarint(in) + return int32((x >> 1) ^ -(x & 1)), n +} + +// Uvarint is a loop unrolled 32 bit uvarint decoder. The return semantics +// are the same as binary.Uvarint, with the added benefit that overflows +// in 5 byte encodings are handled rather than left to the user. +func Uvarint(in []byte) (uint32, int) { + var x uint32 + var overflow int + + if len(in) < 1 { + goto fail + } + + x = uint32(in[0] & 0x7f) + if in[0]&0x80 == 0 { + return x, 1 + } else if len(in) < 2 { + goto fail + } + + x |= uint32(in[1]&0x7f) << 7 + if in[1]&0x80 == 0 { + return x, 2 + } else if len(in) < 3 { + goto fail + } + + x |= uint32(in[2]&0x7f) << 14 + if in[2]&0x80 == 0 { + return x, 3 + } else if len(in) < 4 { + goto fail + } + + x |= uint32(in[3]&0x7f) << 21 + if in[3]&0x80 == 0 { + return x, 4 + } else if len(in) < 5 { + goto fail + } + + x |= uint32(in[4]) << 28 + if in[4] <= 0x0f { + return x, 5 + } + + overflow = -5 + +fail: + return 0, overflow +} + +// Varlong is a loop unrolled 64 bit varint decoder. The return semantics +// are the same as binary.Varint, with the added benefit that overflows +// in 10 byte encodings are handled rather than left to the user. +func Varlong(in []byte) (int64, int) { + x, n := uvarlong(in) + return int64((x >> 1) ^ -(x & 1)), n +} + +func uvarlong(in []byte) (uint64, int) { + var x uint64 + var overflow int + + if len(in) < 1 { + goto fail + } + + x = uint64(in[0] & 0x7f) + if in[0]&0x80 == 0 { + return x, 1 + } else if len(in) < 2 { + goto fail + } + + x |= uint64(in[1]&0x7f) << 7 + if in[1]&0x80 == 0 { + return x, 2 + } else if len(in) < 3 { + goto fail + } + + x |= uint64(in[2]&0x7f) << 14 + if in[2]&0x80 == 0 { + return x, 3 + } else if len(in) < 4 { + goto fail + } + + x |= uint64(in[3]&0x7f) << 21 + if in[3]&0x80 == 0 { + return x, 4 + } else if len(in) < 5 { + goto fail + } + + x |= uint64(in[4]&0x7f) << 28 + if in[4]&0x80 == 0 { + return x, 5 + } else if len(in) < 6 { + goto fail + } + + x |= uint64(in[5]&0x7f) << 35 + if in[5]&0x80 == 0 { + return x, 6 + } else if len(in) < 7 { + goto fail + } + + x |= uint64(in[6]&0x7f) << 42 + if in[6]&0x80 == 0 { + return x, 7 + } else if len(in) < 8 { + goto fail + } + + x |= uint64(in[7]&0x7f) << 49 + if in[7]&0x80 == 0 { + return x, 8 + } else if len(in) < 9 { + goto fail + } + + x |= uint64(in[8]&0x7f) << 56 + if in[8]&0x80 == 0 { + return x, 9 + } else if len(in) < 10 { + goto fail + } + + x |= uint64(in[9]) << 63 + if in[9] <= 0x01 { + return x, 10 + } + + overflow = -10 + +fail: + return 0, overflow +} + +// AppendVarint appends a varint encoded i to dst. +func AppendVarint(dst []byte, i int32) []byte { + return AppendUvarint(dst, uint32(i)<<1^uint32(i>>31)) +} + +// AppendUvarint appends a uvarint encoded u to dst. +func AppendUvarint(dst []byte, u uint32) []byte { + switch UvarintLen(u) { + case 5: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte(u>>28)) + case 4: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte(u>>21)) + case 3: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte(u>>14)) + case 2: + return append(dst, + byte(u&0x7f|0x80), + byte(u>>7)) + case 1: + return append(dst, byte(u)) + } + return dst +} + +// AppendVarlong appends a varint encoded i to dst. +func AppendVarlong(dst []byte, i int64) []byte { + return appendUvarlong(dst, uint64(i)<<1^uint64(i>>63)) +} + +func appendUvarlong(dst []byte, u uint64) []byte { + switch uvarlongLen(u) { + case 10: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte((u>>49)&0x7f|0x80), + byte((u>>56)&0x7f|0x80), + byte(u>>63)) + case 9: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte((u>>49)&0x7f|0x80), + byte(u>>56)) + case 8: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte(u>>49)) + case 7: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte(u>>42)) + case 6: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte(u>>35)) + case 5: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte(u>>28)) + case 4: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte(u>>21)) + case 3: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte(u>>14)) + case 2: + return append(dst, + byte(u&0x7f|0x80), + byte(u>>7)) + case 1: + return append(dst, byte(u)) + } + return dst +} + +// AppendString appends a string to dst prefixed with its int16 length. +func AppendString(dst []byte, s string) []byte { + dst = AppendInt16(dst, int16(len(s))) + return append(dst, s...) +} + +// AppendCompactString appends a string to dst prefixed with its uvarint length +// starting at 1; 0 is reserved for null, which compact strings are not +// (nullable compact ones are!). Thus, the length is the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactString(dst []byte, s string) []byte { + dst = AppendUvarint(dst, 1+uint32(len(s))) + return append(dst, s...) +} + +// AppendNullableString appends potentially nil string to dst prefixed with its +// int16 length or int16(-1) if nil. +func AppendNullableString(dst []byte, s *string) []byte { + if s == nil { + return AppendInt16(dst, -1) + } + return AppendString(dst, *s) +} + +// AppendCompactNullableString appends a potentially nil string to dst with its +// uvarint length starting at 1, with 0 indicating null. Thus, the length is +// the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactNullableString(dst []byte, s *string) []byte { + if s == nil { + return AppendUvarint(dst, 0) + } + return AppendCompactString(dst, *s) +} + +// AppendBytes appends bytes to dst prefixed with its int32 length. +func AppendBytes(dst, b []byte) []byte { + dst = AppendInt32(dst, int32(len(b))) + return append(dst, b...) +} + +// AppendCompactBytes appends bytes to dst prefixed with a its uvarint length +// starting at 1; 0 is reserved for null, which compact bytes are not (nullable +// compact ones are!). Thus, the length is the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactBytes(dst, b []byte) []byte { + dst = AppendUvarint(dst, 1+uint32(len(b))) + return append(dst, b...) +} + +// AppendNullableBytes appends a potentially nil slice to dst prefixed with its +// int32 length or int32(-1) if nil. +func AppendNullableBytes(dst, b []byte) []byte { + if b == nil { + return AppendInt32(dst, -1) + } + return AppendBytes(dst, b) +} + +// AppendCompactNullableBytes appends a potentially nil slice to dst with its +// uvarint length starting at 1, with 0 indicating null. Thus, the length is +// the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactNullableBytes(dst, b []byte) []byte { + if b == nil { + return AppendUvarint(dst, 0) + } + return AppendCompactBytes(dst, b) +} + +// AppendVarintString appends a string to dst prefixed with its length encoded +// as a varint. +func AppendVarintString(dst []byte, s string) []byte { + dst = AppendVarint(dst, int32(len(s))) + return append(dst, s...) +} + +// AppendVarintBytes appends a slice to dst prefixed with its length encoded as +// a varint. +func AppendVarintBytes(dst, b []byte) []byte { + if b == nil { + return AppendVarint(dst, -1) + } + dst = AppendVarint(dst, int32(len(b))) + return append(dst, b...) +} + +// AppendArrayLen appends the length of an array as an int32 to dst. +func AppendArrayLen(dst []byte, l int) []byte { + return AppendInt32(dst, int32(l)) +} + +// AppendCompactArrayLen appends the length of an array as a uvarint to dst +// as the length + 1. +// +// For KIP-482. +func AppendCompactArrayLen(dst []byte, l int) []byte { + return AppendUvarint(dst, 1+uint32(l)) +} + +// AppendNullableArrayLen appends the length of an array as an int32 to dst, +// or -1 if isNil is true. +func AppendNullableArrayLen(dst []byte, l int, isNil bool) []byte { + if isNil { + return AppendInt32(dst, -1) + } + return AppendInt32(dst, int32(l)) +} + +// AppendCompactNullableArrayLen appends the length of an array as a uvarint to +// dst as the length + 1; if isNil is true, this appends 0 as a uvarint. +// +// For KIP-482. +func AppendCompactNullableArrayLen(dst []byte, l int, isNil bool) []byte { + if isNil { + return AppendUvarint(dst, 0) + } + return AppendUvarint(dst, 1+uint32(l)) +} + +// Reader is used to decode Kafka messages. +// +// For all functions on Reader, if the reader has been invalidated, functions +// return defaults (false, 0, nil, ""). Use Complete to detect if the reader +// was invalidated or if the reader has remaining data. +type Reader struct { + Src []byte + bad bool +} + +// Bool returns a bool from the reader. +func (b *Reader) Bool() bool { + if len(b.Src) < 1 { + b.bad = true + b.Src = nil + return false + } + t := b.Src[0] != 0 // if '0', false + b.Src = b.Src[1:] + return t +} + +// Int8 returns an int8 from the reader. +func (b *Reader) Int8() int8 { + if len(b.Src) < 1 { + b.bad = true + b.Src = nil + return 0 + } + r := b.Src[0] + b.Src = b.Src[1:] + return int8(r) +} + +// Int16 returns an int16 from the reader. +func (b *Reader) Int16() int16 { + if len(b.Src) < 2 { + b.bad = true + b.Src = nil + return 0 + } + r := int16(binary.BigEndian.Uint16(b.Src)) + b.Src = b.Src[2:] + return r +} + +// Uint16 returns an uint16 from the reader. +func (b *Reader) Uint16() uint16 { + if len(b.Src) < 2 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint16(b.Src) + b.Src = b.Src[2:] + return r +} + +// Int32 returns an int32 from the reader. +func (b *Reader) Int32() int32 { + if len(b.Src) < 4 { + b.bad = true + b.Src = nil + return 0 + } + r := int32(binary.BigEndian.Uint32(b.Src)) + b.Src = b.Src[4:] + return r +} + +// Int64 returns an int64 from the reader. +func (b *Reader) Int64() int64 { + return int64(b.readUint64()) +} + +// Uuid returns a uuid from the reader. +func (b *Reader) Uuid() [16]byte { + var r [16]byte + copy(r[:], b.Span(16)) + return r +} + +// Float64 returns a float64 from the reader. +func (b *Reader) Float64() float64 { + return math.Float64frombits(b.readUint64()) +} + +func (b *Reader) readUint64() uint64 { + if len(b.Src) < 8 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint64(b.Src) + b.Src = b.Src[8:] + return r +} + +// Uint32 returns a uint32 from the reader. +func (b *Reader) Uint32() uint32 { + if len(b.Src) < 4 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint32(b.Src) + b.Src = b.Src[4:] + return r +} + +// Varint returns a varint int32 from the reader. +func (b *Reader) Varint() int32 { + val, n := Varint(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Varlong returns a varlong int64 from the reader. +func (b *Reader) Varlong() int64 { + val, n := Varlong(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Uvarint returns a uvarint encoded uint32 from the reader. +func (b *Reader) Uvarint() uint32 { + val, n := Uvarint(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Span returns l bytes from the reader. +func (b *Reader) Span(l int) []byte { + if len(b.Src) < l || l < 0 { + b.bad = true + b.Src = nil + return nil + } + r := b.Src[:l:l] + b.Src = b.Src[l:] + return r +} + +// UnsafeString returns a Kafka string from the reader without allocating using +// the unsafe package. This must be used with care; note the string holds a +// reference to the original slice. +func (b *Reader) UnsafeString() string { + l := b.Int16() + return UnsafeString(b.Span(int(l))) +} + +// String returns a Kafka string from the reader. +func (b *Reader) String() string { + l := b.Int16() + return string(b.Span(int(l))) +} + +// UnsafeCompactString returns a Kafka compact string from the reader without +// allocating using the unsafe package. This must be used with care; note the +// string holds a reference to the original slice. +func (b *Reader) UnsafeCompactString() string { + l := int(b.Uvarint()) - 1 + return UnsafeString(b.Span(l)) +} + +// CompactString returns a Kafka compact string from the reader. +func (b *Reader) CompactString() string { + l := int(b.Uvarint()) - 1 + return string(b.Span(l)) +} + +// UnsafeNullableString returns a Kafka nullable string from the reader without +// allocating using the unsafe package. This must be used with care; note the +// string holds a reference to the original slice. +func (b *Reader) UnsafeNullableString() *string { + l := b.Int16() + if l < 0 { + return nil + } + s := UnsafeString(b.Span(int(l))) + return &s +} + +// NullableString returns a Kafka nullable string from the reader. +func (b *Reader) NullableString() *string { + l := b.Int16() + if l < 0 { + return nil + } + s := string(b.Span(int(l))) + return &s +} + +// UnsafeCompactNullableString returns a Kafka compact nullable string from the +// reader without allocating using the unsafe package. This must be used with +// care; note the string holds a reference to the original slice. +func (b *Reader) UnsafeCompactNullableString() *string { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + s := UnsafeString(b.Span(l)) + return &s +} + +// CompactNullableString returns a Kafka compact nullable string from the +// reader. +func (b *Reader) CompactNullableString() *string { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + s := string(b.Span(l)) + return &s +} + +// Bytes returns a Kafka byte array from the reader. +// +// This never returns nil. +func (b *Reader) Bytes() []byte { + l := b.Int32() + // This is not to spec, but it is not clearly documented and Microsoft + // EventHubs fails here. -1 means null, which should throw an + // exception. EventHubs uses -1 to mean "does not exist" on some + // non-nullable fields. + // + // Until EventHubs is fixed, we return an empty byte slice for null. + if l == -1 { + return []byte{} + } + return b.Span(int(l)) +} + +// CompactBytes returns a Kafka compact byte array from the reader. +// +// This never returns nil. +func (b *Reader) CompactBytes() []byte { + l := int(b.Uvarint()) - 1 + if l == -1 { // same as above: -1 should not be allowed here + return []byte{} + } + return b.Span(l) +} + +// NullableBytes returns a Kafka nullable byte array from the reader, returning +// nil as appropriate. +func (b *Reader) NullableBytes() []byte { + l := b.Int32() + if l < 0 { + return nil + } + r := b.Span(int(l)) + return r +} + +// CompactNullableBytes returns a Kafka compact nullable byte array from the +// reader, returning nil as appropriate. +func (b *Reader) CompactNullableBytes() []byte { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + r := b.Span(l) + return r +} + +// ArrayLen returns a Kafka array length from the reader. +func (b *Reader) ArrayLen() int32 { + r := b.Int32() + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// VarintArrayLen returns a Kafka array length from the reader. +func (b *Reader) VarintArrayLen() int32 { + r := b.Varint() + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// CompactArrayLen returns a Kafka compact array length from the reader. +func (b *Reader) CompactArrayLen() int32 { + r := int32(b.Uvarint()) - 1 + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// VarintBytes returns a Kafka encoded varint array from the reader, returning +// nil as appropriate. +func (b *Reader) VarintBytes() []byte { + l := b.Varint() + if l < 0 { + return nil + } + return b.Span(int(l)) +} + +// UnsafeVarintString returns a Kafka encoded varint string from the reader +// without allocating using the unsafe package. This must be used with care; +// note the string holds a reference to the original slice. +func (b *Reader) UnsafeVarintString() string { + return UnsafeString(b.VarintBytes()) +} + +// VarintString returns a Kafka encoded varint string from the reader. +func (b *Reader) VarintString() string { + return string(b.VarintBytes()) +} + +// Complete returns ErrNotEnoughData if the source ran out while decoding. +func (b *Reader) Complete() error { + if b.bad { + return ErrNotEnoughData + } + return nil +} + +// Ok returns true if the reader is still ok. +func (b *Reader) Ok() bool { + return !b.bad +} + +// UnsafeString returns the slice as a string using unsafe rule (6). +func UnsafeString(slice []byte) string { + var str string + strhdr := (*reflect.StringHeader)(unsafe.Pointer(&str)) //nolint:gosec // known way to convert slice to string + strhdr.Data = ((*reflect.SliceHeader)(unsafe.Pointer(&slice))).Data //nolint:gosec // known way to convert slice to string + strhdr.Len = len(slice) + return str +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/LICENSE b/vendor/github.com/twmb/franz-go/pkg/kmsg/LICENSE new file mode 100644 index 0000000..36e1803 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/LICENSE @@ -0,0 +1,24 @@ +Copyright 2020, Travis Bischel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/api.go b/vendor/github.com/twmb/franz-go/pkg/kmsg/api.go new file mode 100644 index 0000000..6bda2e6 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/api.go @@ -0,0 +1,423 @@ +// Package kmsg contains Kafka request and response types and autogenerated +// serialization and deserialization functions. +// +// This package may bump major versions whenever Kafka makes a backwards +// incompatible protocol change, per the types chosen for this package. For +// example, Kafka can change a field from non-nullable to nullable, which would +// require changing a field from a non-pointer to a pointer. We could get +// around this by making everything an opaque struct and having getters, but +// that is more tedious than having a few rare major version bumps. +// +// If you are using this package directly with kgo, you should either always +// use New functions, or Default functions after creating structs, or you +// should pin the max supported version. If you use New functions, you will +// have safe defaults as new fields are added. If you pin versions, you will +// avoid new fields being used. If you do neither of these, you may opt in to +// new fields that do not have safe zero value defaults, and this may lead to +// errors or unexpected results. +// +// Thus, whenever you initialize a struct from this package, do the following: +// +// struct := kmsg.NewFoo() +// struct.Field = "value I want to set" +// +// Most of this package is generated, but a few things are manual. What is +// manual: all interfaces, the RequestFormatter, record / message / record +// batch reading, and sticky member metadata serialization. +package kmsg + +import ( + "context" + "sort" + + "github.com/twmb/franz-go/pkg/kmsg/internal/kbin" +) + +//go:generate cp ../kbin/primitives.go internal/kbin/ + +// Requestor issues requests. Notably, the kgo.Client and kgo.Broker implements +// Requestor. All Requests in this package have a RequestWith function to have +// type-safe requests. +type Requestor interface { + // Request issues a Request and returns either a Response or an error. + Request(context.Context, Request) (Response, error) +} + +// Request represents a type that can be requested to Kafka. +type Request interface { + // Key returns the protocol key for this message kind. + Key() int16 + // MaxVersion returns the maximum protocol version this message + // supports. + // + // This function allows one to implement a client that chooses message + // versions based off of the max of a message's max version in the + // client and the broker's max supported version. + MaxVersion() int16 + // SetVersion sets the version to use for this request and response. + SetVersion(int16) + // GetVersion returns the version currently set to use for the request + // and response. + GetVersion() int16 + // IsFlexible returns whether the request at its current version is + // "flexible" as per the KIP-482. + IsFlexible() bool + // AppendTo appends this message in wire protocol form to a slice and + // returns the slice. + AppendTo([]byte) []byte + // ReadFrom parses all of the input slice into the response type. + // + // This should return an error if too little data is input. + ReadFrom([]byte) error + // ResponseKind returns an empty Response that is expected for + // this message request. + ResponseKind() Response +} + +// AdminRequest represents a request that must be issued to Kafka controllers. +type AdminRequest interface { + // IsAdminRequest is a method attached to requests that must be + // issed to Kafka controllers. + IsAdminRequest() + Request +} + +// GroupCoordinatorRequest represents a request that must be issued to a +// group coordinator. +type GroupCoordinatorRequest interface { + // IsGroupCoordinatorRequest is a method attached to requests that + // must be issued to group coordinators. + IsGroupCoordinatorRequest() + Request +} + +// TxnCoordinatorRequest represents a request that must be issued to a +// transaction coordinator. +type TxnCoordinatorRequest interface { + // IsTxnCoordinatorRequest is a method attached to requests that + // must be issued to transaction coordinators. + IsTxnCoordinatorRequest() + Request +} + +// Response represents a type that Kafka responds with. +type Response interface { + // Key returns the protocol key for this message kind. + Key() int16 + // MaxVersion returns the maximum protocol version this message + // supports. + MaxVersion() int16 + // SetVersion sets the version to use for this request and response. + SetVersion(int16) + // GetVersion returns the version currently set to use for the request + // and response. + GetVersion() int16 + // IsFlexible returns whether the request at its current version is + // "flexible" as per the KIP-482. + IsFlexible() bool + // AppendTo appends this message in wire protocol form to a slice and + // returns the slice. + AppendTo([]byte) []byte + // ReadFrom parses all of the input slice into the response type. + // + // This should return an error if too little data is input. + ReadFrom([]byte) error + // RequestKind returns an empty Request that is expected for + // this message request. + RequestKind() Request +} + +// UnsafeReadFrom, implemented by all requests and responses generated in this +// package, switches to using unsafe slice-to-string conversions when reading. +// This can be used to avoid a lot of garbage, but it means to have to be +// careful when using any strings in structs: if you hold onto the string, the +// underlying response slice will not be garbage collected. +type UnsafeReadFrom interface { + UnsafeReadFrom([]byte) error +} + +// ThrottleResponse represents a response that could have a throttle applied by +// Kafka. Any response that implements ThrottleResponse also implements +// SetThrottleResponse. +// +// Kafka 2.0.0 switched throttles from being applied before responses to being +// applied after responses. +type ThrottleResponse interface { + // Throttle returns the response's throttle millis value and + // whether Kafka applies the throttle after the response. + Throttle() (int32, bool) +} + +// SetThrottleResponse sets the throttle in a response that can have a throttle +// applied. Any kmsg interface that implements ThrottleResponse also implements +// SetThrottleResponse. +type SetThrottleResponse interface { + // SetThrottle sets the response's throttle millis value. + SetThrottle(int32) +} + +// TimeoutRequest represents a request that has a TimeoutMillis field. +// Any request that implements TimeoutRequest also implements SetTimeoutRequest. +type TimeoutRequest interface { + // Timeout returns the request's timeout millis value. + Timeout() int32 +} + +// SetTimeoutRequest sets the timeout in a request that can have a timeout +// applied. Any kmsg interface that implements ThrottleRequest also implements +// SetThrottleRequest. +type SetTimeoutRequest interface { + // SetTimeout sets the request's timeout millis value. + SetTimeout(timeoutMillis int32) +} + +// RequestFormatter formats requests. +// +// The default empty struct works correctly, but can be extended with the +// NewRequestFormatter function. +type RequestFormatter struct { + clientID *string +} + +// RequestFormatterOpt applys options to a RequestFormatter. +type RequestFormatterOpt interface { + apply(*RequestFormatter) +} + +type formatterOpt struct{ fn func(*RequestFormatter) } + +func (opt formatterOpt) apply(f *RequestFormatter) { opt.fn(f) } + +// FormatterClientID attaches the given client ID to any issued request, +// minus controlled shutdown v0, which uses its own special format. +func FormatterClientID(id string) RequestFormatterOpt { + return formatterOpt{func(f *RequestFormatter) { f.clientID = &id }} +} + +// NewRequestFormatter returns a RequestFormatter with the opts applied. +func NewRequestFormatter(opts ...RequestFormatterOpt) *RequestFormatter { + a := new(RequestFormatter) + for _, opt := range opts { + opt.apply(a) + } + return a +} + +// AppendRequest appends a full message request to dst, returning the updated +// slice. This message is the full body that needs to be written to issue a +// Kafka request. +func (f *RequestFormatter) AppendRequest( + dst []byte, + r Request, + correlationID int32, +) []byte { + dst = append(dst, 0, 0, 0, 0) // reserve length + k := r.Key() + v := r.GetVersion() + dst = kbin.AppendInt16(dst, k) + dst = kbin.AppendInt16(dst, v) + dst = kbin.AppendInt32(dst, correlationID) + if k == 7 && v == 0 { + return dst + } + + // Even with flexible versions, we do not use a compact client id. + // Clients issue ApiVersions immediately before knowing the broker + // version, and old brokers will not be able to understand a compact + // client id. + dst = kbin.AppendNullableString(dst, f.clientID) + + // The flexible tags end the request header, and then begins the + // request body. + if r.IsFlexible() { + var numTags uint8 + dst = append(dst, numTags) + if numTags != 0 { + // TODO when tags are added + } + } + + // Now the request body. + dst = r.AppendTo(dst) + + kbin.AppendInt32(dst[:0], int32(len(dst[4:]))) + return dst +} + +// StringPtr is a helper to return a pointer to a string. +func StringPtr(in string) *string { + return &in +} + +// ReadFrom provides decoding various versions of sticky member metadata. A key +// point of this type is that it does not contain a version number inside it, +// but it is versioned: if decoding v1 fails, this falls back to v0. +func (s *StickyMemberMetadata) ReadFrom(src []byte) error { + return s.readFrom(src, false) +} + +// UnsafeReadFrom is the same as ReadFrom, but uses unsafe slice to string +// conversions to reduce garbage. +func (s *StickyMemberMetadata) UnsafeReadFrom(src []byte) error { + return s.readFrom(src, true) +} + +func (s *StickyMemberMetadata) readFrom(src []byte, unsafe bool) error { + b := kbin.Reader{Src: src} + numAssignments := b.ArrayLen() + if numAssignments < 0 { + numAssignments = 0 + } + need := numAssignments - int32(cap(s.CurrentAssignment)) + if need > 0 { + s.CurrentAssignment = append(s.CurrentAssignment[:cap(s.CurrentAssignment)], make([]StickyMemberMetadataCurrentAssignment, need)...) + } else { + s.CurrentAssignment = s.CurrentAssignment[:numAssignments] + } + for i := int32(0); i < numAssignments; i++ { + var topic string + if unsafe { + topic = b.UnsafeString() + } else { + topic = b.String() + } + numPartitions := b.ArrayLen() + if numPartitions < 0 { + numPartitions = 0 + } + a := &s.CurrentAssignment[i] + a.Topic = topic + need := numPartitions - int32(cap(a.Partitions)) + if need > 0 { + a.Partitions = append(a.Partitions[:cap(a.Partitions)], make([]int32, need)...) + } else { + a.Partitions = a.Partitions[:numPartitions] + } + for i := range a.Partitions { + a.Partitions[i] = b.Int32() + } + } + if len(b.Src) > 0 { + s.Generation = b.Int32() + } else { + s.Generation = -1 + } + return b.Complete() +} + +// AppendTo provides appending various versions of sticky member metadata to dst. +// If generation is not -1 (default for v0), this appends as version 1. +func (s *StickyMemberMetadata) AppendTo(dst []byte) []byte { + dst = kbin.AppendArrayLen(dst, len(s.CurrentAssignment)) + for _, assignment := range s.CurrentAssignment { + dst = kbin.AppendString(dst, assignment.Topic) + dst = kbin.AppendArrayLen(dst, len(assignment.Partitions)) + for _, partition := range assignment.Partitions { + dst = kbin.AppendInt32(dst, partition) + } + } + if s.Generation != -1 { + dst = kbin.AppendInt32(dst, s.Generation) + } + return dst +} + +// TagReader has is a type that has the ability to skip tags. +// +// This is effectively a trimmed version of the kbin.Reader, with the purpose +// being that kmsg cannot depend on an external package. +type TagReader interface { + // Uvarint returns a uint32. If the reader has read too much and has + // exhausted all bytes, this should set the reader's internal state + // to failed and return 0. + Uvarint() uint32 + + // Span returns n bytes from the reader. If the reader has read too + // much and exhausted all bytes this should set the reader's internal + // to failed and return nil. + Span(n int) []byte +} + +// SkipTags skips tags in a TagReader. +func SkipTags(b TagReader) { + for num := b.Uvarint(); num > 0; num-- { + _, size := b.Uvarint(), b.Uvarint() + b.Span(int(size)) + } +} + +// internalSkipTags skips tags in the duplicated inner kbin.Reader. +func internalSkipTags(b *kbin.Reader) { + for num := b.Uvarint(); num > 0; num-- { + _, size := b.Uvarint(), b.Uvarint() + b.Span(int(size)) + } +} + +// ReadTags reads tags in a TagReader and returns the tags. +func ReadTags(b TagReader) Tags { + var t Tags + for num := b.Uvarint(); num > 0; num-- { + key, size := b.Uvarint(), b.Uvarint() + t.Set(key, b.Span(int(size))) + } + return t +} + +// internalReadTags reads tags in a reader and returns the tags from a +// duplicated inner kbin.Reader. +func internalReadTags(b *kbin.Reader) Tags { + var t Tags + for num := b.Uvarint(); num > 0; num-- { + key, size := b.Uvarint(), b.Uvarint() + t.Set(key, b.Span(int(size))) + } + return t +} + +// Tags is an opaque structure capturing unparsed tags. +type Tags struct { + keyvals map[uint32][]byte +} + +// Len returns the number of keyvals in Tags. +func (t *Tags) Len() int { return len(t.keyvals) } + +// Each calls fn for each key and val in the tags. +func (t *Tags) Each(fn func(uint32, []byte)) { + if len(t.keyvals) == 0 { + return + } + // We must encode keys in order. We expect to have limited (no) unknown + // keys, so for now, we take a lazy approach and allocate an ordered + // slice. + ordered := make([]uint32, 0, len(t.keyvals)) + for key := range t.keyvals { + ordered = append(ordered, key) + } + sort.Slice(ordered, func(i, j int) bool { return ordered[i] < ordered[j] }) + for _, key := range ordered { + fn(key, t.keyvals[key]) + } +} + +// Set sets a tag's key and val. +// +// Note that serializing tags does NOT check if the set key overlaps with an +// existing used key. It is invalid to set a key used by Kafka itself. +func (t *Tags) Set(key uint32, val []byte) { + if t.keyvals == nil { + t.keyvals = make(map[uint32][]byte) + } + t.keyvals[key] = val +} + +// AppendEach appends each keyval in tags to dst and returns the updated dst. +func (t *Tags) AppendEach(dst []byte) []byte { + t.Each(func(key uint32, val []byte) { + dst = kbin.AppendUvarint(dst, key) + dst = kbin.AppendUvarint(dst, uint32(len(val))) + dst = append(dst, val...) + }) + return dst +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/generated.go b/vendor/github.com/twmb/franz-go/pkg/kmsg/generated.go new file mode 100644 index 0000000..75bff99 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/generated.go @@ -0,0 +1,46895 @@ +package kmsg + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/twmb/franz-go/pkg/kmsg/internal/kbin" +) + +// Code generated by franz-go/generate. DO NOT EDIT. + +// MaxKey is the maximum key used for any messages in this package. +// Note that this value will change as Kafka adds more messages. +const MaxKey = 68 + +// MessageV0 is the message format Kafka used prior to 0.10. +// +// To produce or fetch messages, Kafka would write many messages contiguously +// as an array without specifying the array length. +type MessageV0 struct { + // Offset is the offset of this record. + // + // If this is the outer message of a recursive message set (i.e. a + // message set has been compressed and this is the outer message), + // then the offset should be the offset of the last inner value. + Offset int64 + + // MessageSize is the size of everything that follows in this message. + MessageSize int32 + + // CRC is the crc of everything that follows this field (NOT using the + // Castagnoli polynomial, as is the case in the 0.11+ RecordBatch). + CRC int32 + + // Magic is 0. + Magic int8 + + // Attributes describe the attributes of this message. + // + // The first three bits correspond to compression: + // - 00 is no compression + // - 01 is gzip compression + // - 10 is snappy compression + // + // The remaining bits are unused and must be 0. + Attributes int8 + + // Key is an blob of data for a record. + // + // Key's are usually used for hashing the record to specific Kafka partitions. + Key []byte + + // Value is a blob of data. This field is the main "message" portion of a + // record. + Value []byte +} + +func (v *MessageV0) AppendTo(dst []byte) []byte { + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MessageSize + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CRC + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Magic + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Attributes + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Key + dst = kbin.AppendNullableBytes(dst, v) + } + { + v := v.Value + dst = kbin.AppendNullableBytes(dst, v) + } + return dst +} + +func (v *MessageV0) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *MessageV0) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *MessageV0) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + v := b.Int64() + s.Offset = v + } + { + v := b.Int32() + s.MessageSize = v + } + { + v := b.Int32() + s.CRC = v + } + { + v := b.Int8() + s.Magic = v + } + { + v := b.Int8() + s.Attributes = v + } + { + v := b.NullableBytes() + s.Key = v + } + { + v := b.NullableBytes() + s.Value = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MessageV0. +func (v *MessageV0) Default() { +} + +// NewMessageV0 returns a default MessageV0 +// This is a shortcut for creating a struct and calling Default yourself. +func NewMessageV0() MessageV0 { + var v MessageV0 + v.Default() + return v +} + +// MessageV1 is the message format Kafka used prior to 0.11. +// +// To produce or fetch messages, Kafka would write many messages contiguously +// as an array without specifying the array length. +// +// To support compression, an entire message set would be compressed and used +// as the Value in another message set (thus being "recursive"). The key for +// this outer message set must be null. +type MessageV1 struct { + // Offset is the offset of this record. + // + // Different from v0, if this message set is a recursive message set + // (that is, compressed and inside another message set), the offset + // on the inner set is relative to the offset of the outer set. + Offset int64 + + // MessageSize is the size of everything that follows in this message. + MessageSize int32 + + // CRC is the crc of everything that follows this field (NOT using the + // Castagnoli polynomial, as is the case in the 0.11+ RecordBatch). + CRC int32 + + // Magic is 1. + Magic int8 + + // Attributes describe the attributes of this message. + // + // The first three bits correspond to compression: + // - 00 is no compression + // - 01 is gzip compression + // - 10 is snappy compression + // + // Bit 4 is the timestamp type, with 0 meaning CreateTime corresponding + // to the timestamp being from the producer, and 1 meaning LogAppendTime + // corresponding to the timestamp being from the broker. + // Setting this to LogAppendTime will cause batches to be rejected. + // + // The remaining bits are unused and must be 0. + Attributes int8 + + // Timestamp is the millisecond timestamp of this message. + Timestamp int64 + + // Key is an blob of data for a record. + // + // Key's are usually used for hashing the record to specific Kafka partitions. + Key []byte + + // Value is a blob of data. This field is the main "message" portion of a + // record. + Value []byte +} + +func (v *MessageV1) AppendTo(dst []byte) []byte { + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MessageSize + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CRC + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Magic + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Attributes + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Timestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Key + dst = kbin.AppendNullableBytes(dst, v) + } + { + v := v.Value + dst = kbin.AppendNullableBytes(dst, v) + } + return dst +} + +func (v *MessageV1) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *MessageV1) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *MessageV1) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + v := b.Int64() + s.Offset = v + } + { + v := b.Int32() + s.MessageSize = v + } + { + v := b.Int32() + s.CRC = v + } + { + v := b.Int8() + s.Magic = v + } + { + v := b.Int8() + s.Attributes = v + } + { + v := b.Int64() + s.Timestamp = v + } + { + v := b.NullableBytes() + s.Key = v + } + { + v := b.NullableBytes() + s.Value = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MessageV1. +func (v *MessageV1) Default() { +} + +// NewMessageV1 returns a default MessageV1 +// This is a shortcut for creating a struct and calling Default yourself. +func NewMessageV1() MessageV1 { + var v MessageV1 + v.Default() + return v +} + +// Header is user provided metadata for a record. Kafka does not look at +// headers at all; they are solely for producers and consumers. +type Header struct { + Key string + + Value []byte +} + +func (v *Header) AppendTo(dst []byte) []byte { + { + v := v.Key + dst = kbin.AppendVarintString(dst, v) + } + { + v := v.Value + dst = kbin.AppendVarintBytes(dst, v) + } + return dst +} + +func (v *Header) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *Header) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *Header) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + var v string + if unsafe { + v = b.UnsafeVarintString() + } else { + v = b.VarintString() + } + s.Key = v + } + { + v := b.VarintBytes() + s.Value = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to Header. +func (v *Header) Default() { +} + +// NewHeader returns a default Header +// This is a shortcut for creating a struct and calling Default yourself. +func NewHeader() Header { + var v Header + v.Default() + return v +} + +// RecordBatch is a Kafka concept that groups many individual records together +// in a more optimized format. +type RecordBatch struct { + // FirstOffset is the first offset in a record batch. + // + // For producing, this is usually 0. + FirstOffset int64 + + // Length is the wire length of everything that follows this field. + Length int32 + + // PartitionLeaderEpoch is the leader epoch of the broker at the time + // this batch was written. Kafka uses this for cluster communication, + // but clients can also use this to better aid truncation detection. + // See KIP-320. Producers should set this to -1. + PartitionLeaderEpoch int32 + + // Magic is the current "magic" number of this message format. + // The current magic number is 2. + Magic int8 + + // CRC is the crc of everything that follows this field using the + // Castagnoli polynomial. + CRC int32 + + // Attributes describe the records array of this batch. + // + // The first three bits correspond to compression: + // - 000 is no compression + // - 001 is gzip compression + // - 010 is snappy compression + // - 011 is lz4 compression + // - 100 is zstd compression (produce request version 7+) + // + // Bit 4 is the timestamp type, with 0 meaning CreateTime corresponding + // to the timestamp being from the producer, and 1 meaning LogAppendTime + // corresponding to the timestamp being from the broker. + // Setting this to LogAppendTime will cause batches to be rejected. + // + // Bit 5 indicates whether the batch is part of a transaction (1 is yes). + // + // Bit 6 indicates if the batch includes a control message (1 is yes). + // Control messages are used to enable transactions and are generated from + // the broker. Clients should not return control batches to applications. + Attributes int16 + + // LastOffsetDelta is the offset of the last message in a batch. This is used + // by the broker to ensure correct behavior even with batch compaction. + LastOffsetDelta int32 + + // FirstTimestamp is the timestamp (in milliseconds) of the first record + // in a batch. + FirstTimestamp int64 + + // MaxTimestamp is the timestamp (in milliseconds) of the last record + // in a batch. Similar to LastOffsetDelta, this is used to ensure correct + // behavior with compacting. + MaxTimestamp int64 + + // ProducerID is the broker assigned producerID from an InitProducerID + // request. + // + // Clients that wish to support idempotent messages and transactions must + // set this field. + // + // Note that when not using transactions, any producer here is always + // accepted (and the epoch is always zero). Outside transactions, the ID + // is used only to deduplicate requests (and there must be at max 5 + // concurrent requests). + ProducerID int64 + + // ProducerEpoch is the broker assigned producerEpoch from an InitProducerID + // request. + // + // Clients that wish to support idempotent messages and transactions must + // set this field. + ProducerEpoch int16 + + // FirstSequence is the producer assigned sequence number used by the + // broker to deduplicate messages. + // + // Clients that wish to support idempotent messages and transactions must + // set this field. + // + // The sequence number for each record in a batch is OffsetDelta + FirstSequence. + FirstSequence int32 + + // NumRecords is the number of records in the array below. + // + // This is separate from Records due to the potential for records to be + // compressed. + NumRecords int32 + + // Records contains records, either compressed or uncompressed. + // + // For uncompressed records, this is an array of records ([Record]). + // + // For compressed records, the length of the uncompressed array is kept + // but everything that follows is compressed. + // + // The number of bytes is expected to be the Length field minus 49. + Records []byte +} + +func (v *RecordBatch) AppendTo(dst []byte) []byte { + { + v := v.FirstOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Length + dst = kbin.AppendInt32(dst, v) + } + { + v := v.PartitionLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Magic + dst = kbin.AppendInt8(dst, v) + } + { + v := v.CRC + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Attributes + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LastOffsetDelta + dst = kbin.AppendInt32(dst, v) + } + { + v := v.FirstTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MaxTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.FirstSequence + dst = kbin.AppendInt32(dst, v) + } + { + v := v.NumRecords + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Records + dst = append(dst, v...) + } + return dst +} + +func (v *RecordBatch) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *RecordBatch) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *RecordBatch) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + v := b.Int64() + s.FirstOffset = v + } + { + v := b.Int32() + s.Length = v + } + { + v := b.Int32() + s.PartitionLeaderEpoch = v + } + { + v := b.Int8() + s.Magic = v + } + { + v := b.Int32() + s.CRC = v + } + { + v := b.Int16() + s.Attributes = v + } + { + v := b.Int32() + s.LastOffsetDelta = v + } + { + v := b.Int64() + s.FirstTimestamp = v + } + { + v := b.Int64() + s.MaxTimestamp = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Int32() + s.FirstSequence = v + } + { + v := b.Int32() + s.NumRecords = v + } + { + v := b.Span(int(s.Length) - 49) + s.Records = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to RecordBatch. +func (v *RecordBatch) Default() { +} + +// NewRecordBatch returns a default RecordBatch +// This is a shortcut for creating a struct and calling Default yourself. +func NewRecordBatch() RecordBatch { + var v RecordBatch + v.Default() + return v +} + +// OffsetCommitKey is the key for the Kafka internal __consumer_offsets topic +// if the key starts with an int16 with a value of 0 or 1. +// +// This type was introduced in KAFKA-1012 commit a670537aa3 with release 0.8.2 +// and has been in use ever since. +type OffsetCommitKey struct { + // Version is which encoding version this value is using. + Version int16 + + // Group is the group being committed. + Group string + + // Topic is the topic being committed. + Topic string + + // Partition is the partition being committed. + Partition int32 +} + +func (v *OffsetCommitKey) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Group + dst = kbin.AppendString(dst, v) + } + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + return dst +} + +func (v *OffsetCommitKey) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetCommitKey) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetCommitKey) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Group = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitKey. +func (v *OffsetCommitKey) Default() { +} + +// NewOffsetCommitKey returns a default OffsetCommitKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitKey() OffsetCommitKey { + var v OffsetCommitKey + v.Default() + return v +} + +// OffsetCommitValue is the value for the Kafka internal __consumer_offsets +// topic if the key is of OffsetCommitKey type. +// +// Version 0 was introduced with the key version 0. +// +// KAFKA-1634 commit c5df2a8e3a in 0.9.0 released version 1. +// +// KAFKA-4682 commit 418a91b5d4, proposed in KIP-211 and included in 2.1.0 +// released version 2. +// +// KAFKA-7437 commit 9f7267dd2f, proposed in KIP-320 and included in 2.1.0 +// released version 3. +type OffsetCommitValue struct { + // Version is which encoding version this value is using. + Version int16 + + // Offset is the committed offset. + Offset int64 + + // LeaderEpoch is the epoch of the leader committing this message. + LeaderEpoch int32 // v3+ + + // Metadata is the metadata included in the commit. + Metadata string + + // CommitTimestamp is when this commit occurred. + CommitTimestamp int64 + + // ExpireTimestamp, introduced in v1 and dropped in v2 with KIP-111, + // is when this commit expires. + ExpireTimestamp int64 // v1-v1 +} + +func (v *OffsetCommitValue) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 3 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + dst = kbin.AppendString(dst, v) + } + { + v := v.CommitTimestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 && version <= 1 { + v := v.ExpireTimestamp + dst = kbin.AppendInt64(dst, v) + } + return dst +} + +func (v *OffsetCommitValue) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetCommitValue) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetCommitValue) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := b.Int64() + s.Offset = v + } + if version >= 3 { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Metadata = v + } + { + v := b.Int64() + s.CommitTimestamp = v + } + if version >= 1 && version <= 1 { + v := b.Int64() + s.ExpireTimestamp = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitValue. +func (v *OffsetCommitValue) Default() { +} + +// NewOffsetCommitValue returns a default OffsetCommitValue +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitValue() OffsetCommitValue { + var v OffsetCommitValue + v.Default() + return v +} + +// GroupMetadataKey is the key for the Kafka internal __consumer_offsets topic +// if the key starts with an int16 with a value of 2. +// +// This type was introduced in KAFKA-2017 commit 7c33475274 with release 0.9.0 +// and has been in use ever since. +type GroupMetadataKey struct { + // Version is which encoding version this value is using. + Version int16 + + // Group is the group this metadata is for. + Group string +} + +func (v *GroupMetadataKey) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Group + dst = kbin.AppendString(dst, v) + } + return dst +} + +func (v *GroupMetadataKey) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *GroupMetadataKey) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *GroupMetadataKey) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Group = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to GroupMetadataKey. +func (v *GroupMetadataKey) Default() { +} + +// NewGroupMetadataKey returns a default GroupMetadataKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewGroupMetadataKey() GroupMetadataKey { + var v GroupMetadataKey + v.Default() + return v +} + +type GroupMetadataValueMember struct { + // MemberID is a group member. + MemberID string + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v3+ + + // ClientID is the client ID of this group member. + ClientID string + + // ClientHost is the hostname of this group member. + ClientHost string + + // RebalanceTimeoutMillis is the rebalance timeout of this group member. + RebalanceTimeoutMillis int32 // v1+ + + // SessionTimeoutMillis is the session timeout of this group member. + SessionTimeoutMillis int32 + + // Subscription is the subscription of this group member. + Subscription []byte + + // Assignment is what the leader assigned this group member. + Assignment []byte +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to GroupMetadataValueMember. +func (v *GroupMetadataValueMember) Default() { +} + +// NewGroupMetadataValueMember returns a default GroupMetadataValueMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewGroupMetadataValueMember() GroupMetadataValueMember { + var v GroupMetadataValueMember + v.Default() + return v +} + +// GroupMetadataValue is the value for the Kafka internal __consumer_offsets +// topic if the key is of GroupMetadataKey type. +// +// Version 0 was introduced with the key version 0. +// +// KAFKA-3888 commit 40b1dd3f49, proposed in KIP-62 and included in 0.10.1 +// released version 1. +// +// KAFKA-4682 commit 418a91b5d4, proposed in KIP-211 and included in 2.1.0 +// released version 2. +// +// KAFKA-7862 commit 0f995ba6be, proposed in KIP-345 and included in 2.3.0 +// released version 3. +type GroupMetadataValue struct { + // Version is the version of this value. + Version int16 + + // ProtocolType is the type of protocol being used for the group + // (i.e., "consumer"). + ProtocolType string + + // Generation is the generation of this group. + Generation int32 + + // Protocol is the agreed upon protocol all members are using to partition + // (i.e., "sticky"). + Protocol *string + + // Leader is the group leader. + Leader *string + + // CurrentStateTimestamp is the timestamp for this state of the group + // (stable, etc.). + CurrentStateTimestamp int64 // v2+ + + // Members are the group members. + Members []GroupMetadataValueMember +} + +func (v *GroupMetadataValue) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ProtocolType + dst = kbin.AppendString(dst, v) + } + { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Protocol + dst = kbin.AppendNullableString(dst, v) + } + { + v := v.Leader + dst = kbin.AppendNullableString(dst, v) + } + if version >= 2 { + v := v.CurrentStateTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Members + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.MemberID + dst = kbin.AppendString(dst, v) + } + if version >= 3 { + v := v.InstanceID + dst = kbin.AppendNullableString(dst, v) + } + { + v := v.ClientID + dst = kbin.AppendString(dst, v) + } + { + v := v.ClientHost + dst = kbin.AppendString(dst, v) + } + if version >= 1 { + v := v.RebalanceTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.SessionTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Subscription + dst = kbin.AppendBytes(dst, v) + } + { + v := v.Assignment + dst = kbin.AppendBytes(dst, v) + } + } + } + return dst +} + +func (v *GroupMetadataValue) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *GroupMetadataValue) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *GroupMetadataValue) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.ProtocolType = v + } + { + v := b.Int32() + s.Generation = v + } + { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.Protocol = v + } + { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.Leader = v + } + if version >= 2 { + v := b.Int64() + s.CurrentStateTimestamp = v + } + { + v := s.Members + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]GroupMetadataValueMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.MemberID = v + } + if version >= 3 { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.InstanceID = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.ClientID = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.ClientHost = v + } + if version >= 1 { + v := b.Int32() + s.RebalanceTimeoutMillis = v + } + { + v := b.Int32() + s.SessionTimeoutMillis = v + } + { + v := b.Bytes() + s.Subscription = v + } + { + v := b.Bytes() + s.Assignment = v + } + } + v = a + s.Members = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to GroupMetadataValue. +func (v *GroupMetadataValue) Default() { +} + +// NewGroupMetadataValue returns a default GroupMetadataValue +// This is a shortcut for creating a struct and calling Default yourself. +func NewGroupMetadataValue() GroupMetadataValue { + var v GroupMetadataValue + v.Default() + return v +} + +// TxnMetadataKey is the key for the Kafka internal __transaction_state topic +// if the key starts with an int16 with a value of 0. +type TxnMetadataKey struct { + // Version is the version of this type. + Version int16 + + // TransactionalID is the transactional ID this record is for. + TransactionalID string +} + +func (v *TxnMetadataKey) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TransactionalID + dst = kbin.AppendString(dst, v) + } + return dst +} + +func (v *TxnMetadataKey) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *TxnMetadataKey) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *TxnMetadataKey) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.TransactionalID = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnMetadataKey. +func (v *TxnMetadataKey) Default() { +} + +// NewTxnMetadataKey returns a default TxnMetadataKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnMetadataKey() TxnMetadataKey { + var v TxnMetadataKey + v.Default() + return v +} + +type TxnMetadataValueTopic struct { + // Topic is a topic involved in this transaction. + Topic string + + // Partitions are partitions in this topic involved in the transaction. + Partitions []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnMetadataValueTopic. +func (v *TxnMetadataValueTopic) Default() { +} + +// NewTxnMetadataValueTopic returns a default TxnMetadataValueTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnMetadataValueTopic() TxnMetadataValueTopic { + var v TxnMetadataValueTopic + v.Default() + return v +} + +// TxnMetadataValue is the value for the Kafka internal __transaction_state +// topic if the key is of TxnMetadataKey type. +type TxnMetadataValue struct { + // Version is the version of this value. + Version int16 + + // ProducerID is the ID in use by the transactional ID. + ProducerID int64 + + // ProducerEpoch is the epoch associated with the producer ID. + ProducerEpoch int16 + + // TimeoutMillis is the timeout of this transaction in milliseconds. + TimeoutMillis int32 + + // State is the state this transaction is in, + // 0 is Empty, 1 is Ongoing, 2 is PrepareCommit, 3 is PrepareAbort, 4 is + // CompleteCommit, 5 is CompleteAbort, 6 is Dead, and 7 is PrepareEpochFence. + State TransactionState + + // Topics are topics that are involved in this transaction. + Topics []TxnMetadataValueTopic + + // LastUpdateTimestamp is the timestamp in millis of when this transaction + // was last updated. + LastUpdateTimestamp int64 + + // StartTimestamp is the timestamp in millis of when this transaction started. + StartTimestamp int64 +} + +func (v *TxnMetadataValue) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.State + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + } + } + { + v := v.LastUpdateTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.StartTimestamp + dst = kbin.AppendInt64(dst, v) + } + return dst +} + +func (v *TxnMetadataValue) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *TxnMetadataValue) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *TxnMetadataValue) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + { + var t TransactionState + { + v := b.Int8() + t = TransactionState(v) + } + v := t + s.State = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnMetadataValueTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + { + v := b.Int64() + s.LastUpdateTimestamp = v + } + { + v := b.Int64() + s.StartTimestamp = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnMetadataValue. +func (v *TxnMetadataValue) Default() { +} + +// NewTxnMetadataValue returns a default TxnMetadataValue +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnMetadataValue() TxnMetadataValue { + var v TxnMetadataValue + v.Default() + return v +} + +type StickyMemberMetadataCurrentAssignment struct { + // Topic is a topic the group member is currently assigned. + Topic string + + // Partitions are the partitions within a topic that a group member is + // currently assigned. + Partitions []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StickyMemberMetadataCurrentAssignment. +func (v *StickyMemberMetadataCurrentAssignment) Default() { +} + +// NewStickyMemberMetadataCurrentAssignment returns a default StickyMemberMetadataCurrentAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewStickyMemberMetadataCurrentAssignment() StickyMemberMetadataCurrentAssignment { + var v StickyMemberMetadataCurrentAssignment + v.Default() + return v +} + +// StickyMemberMetadata is is what is encoded in UserData for +// ConsumerMemberMetadata in group join requests with the sticky partitioning +// strategy. +// +// V1 added generation, which fixed a bug with flaky group members joining +// repeatedly. See KIP-341 for more details. +// +// Note that clients should always try decoding as v1 and, if that fails, +// fall back to v0. This is necessary due to there being no version number +// anywhere in this type. +type StickyMemberMetadata struct { + // CurrentAssignment is the assignment that a group member has when + // issuing a join. + CurrentAssignment []StickyMemberMetadataCurrentAssignment + + // Generation is the generation of this join. This is incremented every join. + // + // This field has a default of -1. + Generation int32 // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StickyMemberMetadata. +func (v *StickyMemberMetadata) Default() { + v.Generation = -1 +} + +// NewStickyMemberMetadata returns a default StickyMemberMetadata +// This is a shortcut for creating a struct and calling Default yourself. +func NewStickyMemberMetadata() StickyMemberMetadata { + var v StickyMemberMetadata + v.Default() + return v +} + +type ConsumerMemberMetadataOwnedPartition struct { + Topic string + + Partitions []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerMemberMetadataOwnedPartition. +func (v *ConsumerMemberMetadataOwnedPartition) Default() { +} + +// NewConsumerMemberMetadataOwnedPartition returns a default ConsumerMemberMetadataOwnedPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerMemberMetadataOwnedPartition() ConsumerMemberMetadataOwnedPartition { + var v ConsumerMemberMetadataOwnedPartition + v.Default() + return v +} + +// ConsumerMemberMetadata is the metadata that is usually sent with a join group +// request with the "consumer" protocol (normal, non-connect consumers). +type ConsumerMemberMetadata struct { + // Version is 0, 1, 2, or 3. + Version int16 + + // Topics is the list of topics in the group that this member is interested + // in consuming. + Topics []string + + // UserData is arbitrary client data for a given client in the group. + // For sticky assignment, this is StickyMemberMetadata. + UserData []byte + + // OwnedPartitions, introduced for KIP-429, are the partitions that this + // member currently owns. + OwnedPartitions []ConsumerMemberMetadataOwnedPartition // v1+ + + // Generation is the generation of the group. + // + // This field has a default of -1. + Generation int32 // v2+ + + // Rack, if non-nil, opts into rack-aware replica assignment. + Rack *string // v3+ +} + +func (v *ConsumerMemberMetadata) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendString(dst, v) + } + } + { + v := v.UserData + dst = kbin.AppendNullableBytes(dst, v) + } + if version >= 1 { + v := v.OwnedPartitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + } + } + if version >= 2 { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.Rack + dst = kbin.AppendNullableString(dst, v) + } + return dst +} + +func (v *ConsumerMemberMetadata) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerMemberMetadata) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerMemberMetadata) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + a[i] = v + } + v = a + s.Topics = v + } + { + v := b.NullableBytes() + s.UserData = v + } + if version >= 1 { + v := s.OwnedPartitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerMemberMetadataOwnedPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + } + v = a + s.OwnedPartitions = v + } + if version >= 2 { + v := b.Int32() + s.Generation = v + } + if version >= 3 { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.Rack = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerMemberMetadata. +func (v *ConsumerMemberMetadata) Default() { + v.Generation = -1 +} + +// NewConsumerMemberMetadata returns a default ConsumerMemberMetadata +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerMemberMetadata() ConsumerMemberMetadata { + var v ConsumerMemberMetadata + v.Default() + return v +} + +type ConsumerMemberAssignmentTopic struct { + // Topic is a topic in the assignment. + Topic string + + // Partitions contains partitions in the assignment. + Partitions []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerMemberAssignmentTopic. +func (v *ConsumerMemberAssignmentTopic) Default() { +} + +// NewConsumerMemberAssignmentTopic returns a default ConsumerMemberAssignmentTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerMemberAssignmentTopic() ConsumerMemberAssignmentTopic { + var v ConsumerMemberAssignmentTopic + v.Default() + return v +} + +// ConsumerMemberAssignment is the assignment data that is usually sent with a +// sync group request with the "consumer" protocol (normal, non-connect +// consumers). +type ConsumerMemberAssignment struct { + // Verson is 0, 1, or 2. + Version int16 + + // Topics contains topics in the assignment. + Topics []ConsumerMemberAssignmentTopic + + // UserData is arbitrary client data for a given client in the group. + UserData []byte +} + +func (v *ConsumerMemberAssignment) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + } + } + { + v := v.UserData + dst = kbin.AppendNullableBytes(dst, v) + } + return dst +} + +func (v *ConsumerMemberAssignment) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerMemberAssignment) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerMemberAssignment) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerMemberAssignmentTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + { + v := b.NullableBytes() + s.UserData = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerMemberAssignment. +func (v *ConsumerMemberAssignment) Default() { +} + +// NewConsumerMemberAssignment returns a default ConsumerMemberAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerMemberAssignment() ConsumerMemberAssignment { + var v ConsumerMemberAssignment + v.Default() + return v +} + +// ConnectMemberMetadata is the metadata used in a join group request with the +// "connect" protocol. v1 introduced incremental cooperative rebalancing (akin +// to cooperative-sticky) per KIP-415. +// +// v0 defined in connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocol.java +// v1+ defined in connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeConnectProtocol.java +type ConnectMemberMetadata struct { + Version int16 + + URL string + + ConfigOffset int64 + + CurrentAssignment []byte // v1+ +} + +func (v *ConnectMemberMetadata) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.URL + dst = kbin.AppendString(dst, v) + } + { + v := v.ConfigOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.CurrentAssignment + dst = kbin.AppendNullableBytes(dst, v) + } + return dst +} + +func (v *ConnectMemberMetadata) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConnectMemberMetadata) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConnectMemberMetadata) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.URL = v + } + { + v := b.Int64() + s.ConfigOffset = v + } + if version >= 1 { + v := b.NullableBytes() + s.CurrentAssignment = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConnectMemberMetadata. +func (v *ConnectMemberMetadata) Default() { +} + +// NewConnectMemberMetadata returns a default ConnectMemberMetadata +// This is a shortcut for creating a struct and calling Default yourself. +func NewConnectMemberMetadata() ConnectMemberMetadata { + var v ConnectMemberMetadata + v.Default() + return v +} + +type ConnectMemberAssignmentAssignment struct { + Connector string + + Tasks []int16 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConnectMemberAssignmentAssignment. +func (v *ConnectMemberAssignmentAssignment) Default() { +} + +// NewConnectMemberAssignmentAssignment returns a default ConnectMemberAssignmentAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewConnectMemberAssignmentAssignment() ConnectMemberAssignmentAssignment { + var v ConnectMemberAssignmentAssignment + v.Default() + return v +} + +type ConnectMemberAssignmentRevoked struct { + Connector string + + Tasks []int16 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConnectMemberAssignmentRevoked. +func (v *ConnectMemberAssignmentRevoked) Default() { +} + +// NewConnectMemberAssignmentRevoked returns a default ConnectMemberAssignmentRevoked +// This is a shortcut for creating a struct and calling Default yourself. +func NewConnectMemberAssignmentRevoked() ConnectMemberAssignmentRevoked { + var v ConnectMemberAssignmentRevoked + v.Default() + return v +} + +// ConnectMemberAssignment is the assignment that is used in a sync group +// request with the "connect" protocol. See ConnectMemberMetadata for links to +// the Kafka code where these fields are defined. +type ConnectMemberAssignment struct { + Version int16 + + Error int16 + + Leader string + + LeaderURL string + + ConfigOffset int64 + + Assignment []ConnectMemberAssignmentAssignment + + Revoked []ConnectMemberAssignmentRevoked // v1+ + + ScheduledDelay int32 // v1+ +} + +func (v *ConnectMemberAssignment) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Error + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Leader + dst = kbin.AppendString(dst, v) + } + { + v := v.LeaderURL + dst = kbin.AppendString(dst, v) + } + { + v := v.ConfigOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Assignment + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Connector + dst = kbin.AppendString(dst, v) + } + { + v := v.Tasks + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt16(dst, v) + } + } + } + } + if version >= 1 { + v := v.Revoked + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Connector + dst = kbin.AppendString(dst, v) + } + { + v := v.Tasks + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt16(dst, v) + } + } + } + } + if version >= 1 { + v := v.ScheduledDelay + dst = kbin.AppendInt32(dst, v) + } + return dst +} + +func (v *ConnectMemberAssignment) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConnectMemberAssignment) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConnectMemberAssignment) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.Error = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Leader = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.LeaderURL = v + } + { + v := b.Int64() + s.ConfigOffset = v + } + { + v := s.Assignment + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConnectMemberAssignmentAssignment, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Connector = v + } + { + v := s.Tasks + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int16, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int16() + a[i] = v + } + v = a + s.Tasks = v + } + } + v = a + s.Assignment = v + } + if version >= 1 { + v := s.Revoked + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConnectMemberAssignmentRevoked, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Connector = v + } + { + v := s.Tasks + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int16, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int16() + a[i] = v + } + v = a + s.Tasks = v + } + } + v = a + s.Revoked = v + } + if version >= 1 { + v := b.Int32() + s.ScheduledDelay = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConnectMemberAssignment. +func (v *ConnectMemberAssignment) Default() { +} + +// NewConnectMemberAssignment returns a default ConnectMemberAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewConnectMemberAssignment() ConnectMemberAssignment { + var v ConnectMemberAssignment + v.Default() + return v +} + +// DefaultPrincipalData is the encoded principal data. This is used in an +// envelope request from broker to broker. +type DefaultPrincipalData struct { + Version int16 + + // The principal type. + Type string + + // The principal name. + Name string + + // Whether the principal was authenticated by a delegation token on the forwarding broker. + TokenAuthenticated bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (v *DefaultPrincipalData) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Type + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.TokenAuthenticated + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DefaultPrincipalData) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DefaultPrincipalData) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DefaultPrincipalData) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Type = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Bool() + s.TokenAuthenticated = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} +func (v *DefaultPrincipalData) IsFlexible() bool { return v.Version >= 0 } + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DefaultPrincipalData. +func (v *DefaultPrincipalData) Default() { +} + +// NewDefaultPrincipalData returns a default DefaultPrincipalData +// This is a shortcut for creating a struct and calling Default yourself. +func NewDefaultPrincipalData() DefaultPrincipalData { + var v DefaultPrincipalData + v.Default() + return v +} + +// ControlRecordKey is the key in a control record. +type ControlRecordKey struct { + Version int16 + + Type ControlRecordKeyType +} + +func (v *ControlRecordKey) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Type + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + return dst +} + +func (v *ControlRecordKey) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ControlRecordKey) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ControlRecordKey) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var t ControlRecordKeyType + { + v := b.Int8() + t = ControlRecordKeyType(v) + } + v := t + s.Type = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ControlRecordKey. +func (v *ControlRecordKey) Default() { +} + +// NewControlRecordKey returns a default ControlRecordKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewControlRecordKey() ControlRecordKey { + var v ControlRecordKey + v.Default() + return v +} + +// EndTxnMarker is the value for a control record when the key is type 0 or 1. +type EndTxnMarker struct { + Version int16 + + CoordinatorEpoch int32 +} + +func (v *EndTxnMarker) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.CoordinatorEpoch + dst = kbin.AppendInt32(dst, v) + } + return dst +} + +func (v *EndTxnMarker) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndTxnMarker) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndTxnMarker) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := b.Int32() + s.CoordinatorEpoch = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndTxnMarker. +func (v *EndTxnMarker) Default() { +} + +// NewEndTxnMarker returns a default EndTxnMarker +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndTxnMarker() EndTxnMarker { + var v EndTxnMarker + v.Default() + return v +} + +type LeaderChangeMessageVoter struct { + VoterID int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderChangeMessageVoter. +func (v *LeaderChangeMessageVoter) Default() { +} + +// NewLeaderChangeMessageVoter returns a default LeaderChangeMessageVoter +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderChangeMessageVoter() LeaderChangeMessageVoter { + var v LeaderChangeMessageVoter + v.Default() + return v +} + +// LeaderChangeMessage is the value for a control record when the key is type 3. +type LeaderChangeMessage struct { + Version int16 + + // The ID of the newly elected leader. + LeaderID int32 + + // The set of voters in the quorum for this epoch. + Voters []LeaderChangeMessageVoter + + // The voters who voted for the leader at the time of election. + GrantingVoters []LeaderChangeMessageVoter + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (v *LeaderChangeMessage) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Voters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.VoterID + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.GrantingVoters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.VoterID + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaderChangeMessage) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaderChangeMessage) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaderChangeMessage) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.LeaderID = v + } + { + v := s.Voters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderChangeMessageVoter, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.VoterID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Voters = v + } + { + v := s.GrantingVoters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderChangeMessageVoter, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.VoterID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.GrantingVoters = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} +func (v *LeaderChangeMessage) IsFlexible() bool { return v.Version >= 0 } + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderChangeMessage. +func (v *LeaderChangeMessage) Default() { +} + +// NewLeaderChangeMessage returns a default LeaderChangeMessage +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderChangeMessage() LeaderChangeMessage { + var v LeaderChangeMessage + v.Default() + return v +} + +type ProduceRequestTopicPartition struct { + // Partition is a partition to send a record batch to. + Partition int32 + + // Records is a batch of records to write to a topic's partition. + // + // For Kafka pre 0.11.0, the contents of the byte array is a serialized + // message set. At or after 0.11.0, the contents of the byte array is a + // serialized RecordBatch. + Records []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceRequestTopicPartition. +func (v *ProduceRequestTopicPartition) Default() { +} + +// NewProduceRequestTopicPartition returns a default ProduceRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceRequestTopicPartition() ProduceRequestTopicPartition { + var v ProduceRequestTopicPartition + v.Default() + return v +} + +type ProduceRequestTopic struct { + // Topic is a topic to send record batches to. + Topic string + + // Partitions is an array of partitions to send record batches to. + Partitions []ProduceRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceRequestTopic. +func (v *ProduceRequestTopic) Default() { +} + +// NewProduceRequestTopic returns a default ProduceRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceRequestTopic() ProduceRequestTopic { + var v ProduceRequestTopic + v.Default() + return v +} + +// ProduceRequest issues records to be created to Kafka. +// +// Kafka 0.10.0 (v2) changed Records from MessageSet v0 to MessageSet v1. +// Kafka 0.11.0 (v3) again changed Records to RecordBatch. +// +// Note that the special client ID "__admin_client" will allow you to produce +// records to internal topics. This is generally recommended if you want to +// break your Kafka cluster. +type ProduceRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionID is the transaction ID to use for this request, allowing for + // exactly once semantics. + TransactionID *string // v3+ + + // Acks specifies the number of acks that the partition leaders must receive + // from in sync replicas before considering a record batch fully written. + // + // Valid values are -1, 0, or 1 corresponding to all, none, or the leader only. + // + // Note that if no acks are requested, Kafka will close the connection + // if any topic or partition errors to trigger a client metadata refresh. + Acks int16 + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 15000. + TimeoutMillis int32 + + // Topics is an array of topics to send record batches to. + Topics []ProduceRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +func (*ProduceRequest) Key() int16 { return 0 } +func (*ProduceRequest) MaxVersion() int16 { return 10 } +func (v *ProduceRequest) SetVersion(version int16) { v.Version = version } +func (v *ProduceRequest) GetVersion() int16 { return v.Version } +func (v *ProduceRequest) IsFlexible() bool { return v.Version >= 9 } +func (v *ProduceRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *ProduceRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *ProduceRequest) ResponseKind() Response { + r := &ProduceResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ProduceRequest) RequestWith(ctx context.Context, r Requestor) (*ProduceResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ProduceResponse) + return resp, err +} + +func (v *ProduceRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + if version >= 3 { + v := v.TransactionID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Acks + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Records + if isFlexible { + dst = kbin.AppendCompactNullableBytes(dst, v) + } else { + dst = kbin.AppendNullableBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ProduceRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ProduceRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ProduceRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + s := v + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.TransactionID = v + } + { + v := b.Int16() + s.Acks = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + var v []byte + if isFlexible { + v = b.CompactNullableBytes() + } else { + v = b.NullableBytes() + } + s.Records = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrProduceRequest returns a pointer to a default ProduceRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrProduceRequest() *ProduceRequest { + var v ProduceRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceRequest. +func (v *ProduceRequest) Default() { + v.TimeoutMillis = 15000 +} + +// NewProduceRequest returns a default ProduceRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceRequest() ProduceRequest { + var v ProduceRequest + v.Default() + return v +} + +type ProduceResponseTopicPartitionErrorRecord struct { + // RelativeOffset is the offset of the record that caused problems. + RelativeOffset int32 + + // ErrorMessage is the error of this record. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseTopicPartitionErrorRecord. +func (v *ProduceResponseTopicPartitionErrorRecord) Default() { +} + +// NewProduceResponseTopicPartitionErrorRecord returns a default ProduceResponseTopicPartitionErrorRecord +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseTopicPartitionErrorRecord() ProduceResponseTopicPartitionErrorRecord { + var v ProduceResponseTopicPartitionErrorRecord + v.Default() + return v +} + +type ProduceResponseTopicPartitionCurrentLeader struct { + // The ID of the current leader, or -1 if unknown. + // + // This field has a default of -1. + LeaderID int32 + + // The latest known leader epoch. + // + // This field has a default of -1. + LeaderEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseTopicPartitionCurrentLeader. +func (v *ProduceResponseTopicPartitionCurrentLeader) Default() { + v.LeaderID = -1 + v.LeaderEpoch = -1 +} + +// NewProduceResponseTopicPartitionCurrentLeader returns a default ProduceResponseTopicPartitionCurrentLeader +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseTopicPartitionCurrentLeader() ProduceResponseTopicPartitionCurrentLeader { + var v ProduceResponseTopicPartitionCurrentLeader + v.Default() + return v +} + +type ProduceResponseTopicPartition struct { + // Partition is the partition this response pertains to. + Partition int32 + + // ErrorCode is any error for a topic/partition in the request. + // There are many error codes for produce requests. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned for all topics and + // partitions if the request had a transactional ID but the client + // is not authorized for transactions. + // + // CLUSTER_AUTHORIZATION_FAILED is returned for all topics and partitions + // if the request was idempotent but the client is not authorized + // for idempotent requests. + // + // TOPIC_AUTHORIZATION_FAILED is returned for all topics the client + // is not authorized to talk to. + // + // INVALID_REQUIRED_ACKS is returned if the request contained an invalid + // number for "acks". + // + // CORRUPT_MESSAGE is returned for many reasons, generally related to + // problems with messages (invalid magic, size mismatch, etc.). + // + // MESSAGE_TOO_LARGE is returned if a record batch is larger than the + // broker's configured max.message.size. + // + // RECORD_LIST_TOO_LARGE is returned if the record batch is larger than + // the broker's segment.bytes. + // + // INVALID_TIMESTAMP is returned if the record batch uses LogAppendTime + // or if the timestamp delta from when the broker receives the message + // is more than the broker's log.message.timestamp.difference.max.ms. + // + // UNSUPPORTED_FOR_MESSAGE_FORMAT is returned if using a Kafka v2 message + // format (i.e. RecordBatch) feature (idempotence) while sending v1 + // messages (i.e. a MessageSet). + // + // KAFKA_STORAGE_ERROR is returned if the log directory for a partition + // is offline. + // + // NOT_ENOUGH_REPLICAS is returned if all acks are required, but there + // are not enough in sync replicas yet. + // + // NOT_ENOUGH_REPLICAS_AFTER_APPEND is returned on old Kafka versions + // (pre 0.11.0.0) when a message was written to disk and then Kafka + // noticed not enough replicas existed to replicate the message. + // + // DUPLICATE_SEQUENCE_NUMBER is returned for Kafka <1.1.0 when a + // sequence number is detected as a duplicate. After, out of order + // is returned. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic or partition + // is unknown. + // + // NOT_LEADER_FOR_PARTITION is returned if the broker is not a leader + // for this partition. This means that the client has stale metadata. + // + // INVALID_PRODUCER_EPOCH is returned if the produce request was + // attempted with an old epoch. Either there is a newer producer using + // the same transaction ID, or the transaction ID used has expired. + // + // UNKNOWN_PRODUCER_ID, added in Kafka 1.0.0 (message format v5+) is + // returned if the producer used an ID that Kafka does not know about or + // if the request has a larger sequence number than Kafka expects. The + // LogStartOffset must be checked in this case. If the offset is greater + // than the last acknowledged offset, then no data loss has occurred; the + // client just sent data so long ago that Kafka rotated the partition out + // of existence and no longer knows of this producer ID. In this case, + // reset your sequence numbers to 0. If the log start offset is equal to + // or less than what the client sent prior, then data loss has occurred. + // See KAFKA-5793 for more details. NOTE: Unfortunately, even UNKNOWN_PRODUCER_ID + // is unsafe to handle, so this error should likely be treated the same + // as OUT_OF_ORDER_SEQUENCE_NUMER. See KIP-360 for more details. + // + // OUT_OF_ORDER_SEQUENCE_NUMBER is sent if the batch's FirstSequence was + // not what it should be (the last FirstSequence, plus the number of + // records in the last batch, plus one). After 1.0.0, this generally + // means data loss. Before, there could be confusion on if the broker + // actually rotated the partition out of existence (this is why + // UNKNOWN_PRODUCER_ID was introduced). + ErrorCode int16 + + // BaseOffset is the offset that the records in the produce request began + // at in the partition. + BaseOffset int64 + + // LogAppendTime is the millisecond that records were appended to the + // partition inside Kafka. This is only not -1 if records were written + // with the log append time flag (which producers cannot do). + // + // This field has a default of -1. + LogAppendTime int64 // v2+ + + // LogStartOffset, introduced in Kafka 1.0.0, can be used to see if an + // UNKNOWN_PRODUCER_ID means Kafka rotated records containing the used + // producer ID out of existence, or if Kafka lost data. + // + // This field has a default of -1. + LogStartOffset int64 // v5+ + + // ErrorRecords are indices of individual records that caused a batch + // to error. This was added for KIP-467. + ErrorRecords []ProduceResponseTopicPartitionErrorRecord // v8+ + + // ErrorMessage is the global error message of of what caused this batch + // to error. + ErrorMessage *string // v8+ + + CurrentLeader ProduceResponseTopicPartitionCurrentLeader // tag 0 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseTopicPartition. +func (v *ProduceResponseTopicPartition) Default() { + v.LogAppendTime = -1 + v.LogStartOffset = -1 + { + v := &v.CurrentLeader + _ = v + v.LeaderID = -1 + v.LeaderEpoch = -1 + } +} + +// NewProduceResponseTopicPartition returns a default ProduceResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseTopicPartition() ProduceResponseTopicPartition { + var v ProduceResponseTopicPartition + v.Default() + return v +} + +type ProduceResponseTopic struct { + // Topic is the topic this response pertains to. + Topic string + + // Partitions is an array of responses for the partition's that + // batches were sent to. + Partitions []ProduceResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseTopic. +func (v *ProduceResponseTopic) Default() { +} + +// NewProduceResponseTopic returns a default ProduceResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseTopic() ProduceResponseTopic { + var v ProduceResponseTopic + v.Default() + return v +} + +type ProduceResponseBroker struct { + // NodeID is the node ID of a Kafka broker. + NodeID int32 + + // Host is the hostname of a Kafka broker. + Host string + + // Port is the port of a Kafka broker. + Port int32 + + // Rack is the rack this Kafka broker is in. + Rack *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseBroker. +func (v *ProduceResponseBroker) Default() { +} + +// NewProduceResponseBroker returns a default ProduceResponseBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseBroker() ProduceResponseBroker { + var v ProduceResponseBroker + v.Default() + return v +} + +// ProduceResponse is returned from a ProduceRequest. +type ProduceResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is an array of responses for the topic's that batches were sent + // to. + Topics []ProduceResponseTopic + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 6. + ThrottleMillis int32 // v1+ + + // Brokers is present if any partition responses contain the error + // NOT_LEADER_OR_FOLLOWER. + Brokers []ProduceResponseBroker // tag 0 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +func (*ProduceResponse) Key() int16 { return 0 } +func (*ProduceResponse) MaxVersion() int16 { return 10 } +func (v *ProduceResponse) SetVersion(version int16) { v.Version = version } +func (v *ProduceResponse) GetVersion() int16 { return v.Version } +func (v *ProduceResponse) IsFlexible() bool { return v.Version >= 9 } +func (v *ProduceResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 6 } +func (v *ProduceResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ProduceResponse) RequestKind() Request { return &ProduceRequest{Version: v.Version} } + +func (v *ProduceResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.BaseOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 2 { + v := v.LogAppendTime + dst = kbin.AppendInt64(dst, v) + } + if version >= 5 { + v := v.LogStartOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 8 { + v := v.ErrorRecords + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.RelativeOffset + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + var toEncode []uint32 + if !reflect.DeepEqual(v.CurrentLeader, (func() ProduceResponseTopicPartitionCurrentLeader { + var v ProduceResponseTopicPartitionCurrentLeader + v.Default() + return v + })()) { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.CurrentLeader + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fCurrentLeader: + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fCurrentLeader + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + var toEncode []uint32 + if len(v.Brokers) > 0 { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.Brokers + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fBrokers: + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fBrokers + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ProduceResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ProduceResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ProduceResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.BaseOffset = v + } + if version >= 2 { + v := b.Int64() + s.LogAppendTime = v + } + if version >= 5 { + v := b.Int64() + s.LogStartOffset = v + } + if version >= 8 { + v := s.ErrorRecords + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceResponseTopicPartitionErrorRecord, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.RelativeOffset = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ErrorRecords = v + } + if version >= 8 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.CurrentLeader + v.Default() + s := v + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + } + } + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := s.Brokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceResponseBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Brokers = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrProduceResponse returns a pointer to a default ProduceResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrProduceResponse() *ProduceResponse { + var v ProduceResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponse. +func (v *ProduceResponse) Default() { +} + +// NewProduceResponse returns a default ProduceResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponse() ProduceResponse { + var v ProduceResponse + v.Default() + return v +} + +type FetchRequestReplicaState struct { + // The replica ID of the follower, or -1 if this request is from a consumer. + // + // This field has a default of -1. + ID int32 + + // The epoch of this follower, or -1 if not available. + // + // This field has a default of -1. + Epoch int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequestReplicaState. +func (v *FetchRequestReplicaState) Default() { + v.ID = -1 + v.Epoch = -1 +} + +// NewFetchRequestReplicaState returns a default FetchRequestReplicaState +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequestReplicaState() FetchRequestReplicaState { + var v FetchRequestReplicaState + v.Default() + return v +} + +type FetchRequestTopicPartition struct { + // Partition is a partition in a topic to try to fetch records for. + Partition int32 + + // CurrentLeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // allows brokers to check if the client is fenced (has an out of date + // leader) or is using an unknown leader. + // + // The initial leader epoch can be determined from a MetadataResponse. + // To skip log truncation checking, use -1. + // + // This field has a default of -1. + CurrentLeaderEpoch int32 // v9+ + + // FetchOffset is the offset to begin the fetch from. Kafka will + // return records at and after this offset. + FetchOffset int64 + + // The epoch of the last fetched record, or -1 if there is none. + // + // This field has a default of -1. + LastFetchedEpoch int32 // v12+ + + // LogStartOffset is a broker-follower only field added for KIP-107. + // This is the start offset of the partition in a follower. + // + // This field has a default of -1. + LogStartOffset int64 // v5+ + + // PartitionMaxBytes is the maximum bytes to return for this partition. + // This can be used to limit how many bytes an individual partition in + // a request is allotted so that it does not dominate all of MaxBytes. + PartitionMaxBytes int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequestTopicPartition. +func (v *FetchRequestTopicPartition) Default() { + v.CurrentLeaderEpoch = -1 + v.LastFetchedEpoch = -1 + v.LogStartOffset = -1 +} + +// NewFetchRequestTopicPartition returns a default FetchRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequestTopicPartition() FetchRequestTopicPartition { + var v FetchRequestTopicPartition + v.Default() + return v +} + +type FetchRequestTopic struct { + // Topic is a topic to try to fetch records for. + Topic string // v0-v12 + + // TopicID is the uuid of the topic to fetch records for. + TopicID [16]byte // v13+ + + // Partitions contains partitions in a topic to try to fetch records for. + Partitions []FetchRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequestTopic. +func (v *FetchRequestTopic) Default() { +} + +// NewFetchRequestTopic returns a default FetchRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequestTopic() FetchRequestTopic { + var v FetchRequestTopic + v.Default() + return v +} + +type FetchRequestForgottenTopic struct { + // Topic is a topic to remove from being tracked (with the partitions below). + Topic string // v7-v12 + + // TopicID is the uuid of a topic to remove from being tracked (with the + // partitions below). + TopicID [16]byte // v13+ + + // Partitions are partitions to remove from tracking for a topic. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequestForgottenTopic. +func (v *FetchRequestForgottenTopic) Default() { +} + +// NewFetchRequestForgottenTopic returns a default FetchRequestForgottenTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequestForgottenTopic() FetchRequestForgottenTopic { + var v FetchRequestForgottenTopic + v.Default() + return v +} + +// FetchRequest is a long-poll request of records from Kafka. +// +// Kafka 0.11.0.0 released v4 and changed the returned RecordBatches to contain +// the RecordBatch type. Prior, Kafka used the MessageSet type (and, for v0 and +// v1, Kafka used a different type). +// +// Note that starting in v3, Kafka began processing partitions in order, +// meaning the order of partitions in the fetch request is important due to +// potential size constraints. +// +// Starting in v13, topics must use UUIDs rather than their string name +// identifiers. +// +// Version 15 adds the ReplicaState which includes new field ReplicaEpoch and +// the ReplicaID, and deprecates the old ReplicaID (KIP-903). +type FetchRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The cluster ID, if known. This is used to validate metadata fetches + // prior to broker registration. + // + // This field has a default of null. + ClusterID *string // tag 0 + + // ReplicaID is the broker ID of performing the fetch request. Standard + // clients should use -1. To be a "debug" replica, use -2. The debug + // replica can be used to fetch messages from non-leaders. + // + // This field has a default of -1. + ReplicaID int32 // v0-v14 + + // ReplicaState is a broker-only tag for v15+, see KIP-903 for more details. + ReplicaState FetchRequestReplicaState // tag 1 + + // MaxWaitMillis is how long to wait for MinBytes to be hit before a broker + // responds to a fetch request. + MaxWaitMillis int32 + + // MinBytes is the minimum amount of bytes to attempt to read before a broker + // responds to a fetch request. + MinBytes int32 + + // MaxBytes is the maximum amount of bytes to read in a fetch request. The + // response can exceed MaxBytes if the first record in the first non-empty + // partition is larger than MaxBytes. + // + // This field has a default of 0x7fffffff. + MaxBytes int32 // v3+ + + // IsolationLevel changes which messages are fetched. Follower replica ID's + // (non-negative, non-standard-client) fetch from the end. + // + // Standard clients fetch from the high watermark, which corresponds to + // IsolationLevel 0, READ_UNCOMMITTED. + // + // To only read committed records, use IsolationLevel 1, corresponding to + // READ_COMMITTED. + IsolationLevel int8 // v4+ + + // SessionID is used to potentially reduce the amount of back and forth + // data between a client and a broker. If opting in to sessions, the first + // ID used should be 0, and thereafter (until session resets) the ID should + // be the ID returned in the fetch response. + // + // Read KIP-227 for more details. Use -1 if you want to disable sessions. + SessionID int32 // v7+ + + // SessionEpoch is the session epoch for this request if using sessions. + // + // Read KIP-227 for more details. Use -1 if you are not using sessions. + // + // This field has a default of -1. + SessionEpoch int32 // v7+ + + // Topic contains topics to try to fetch records for. + Topics []FetchRequestTopic + + // ForgottenTopics contains topics and partitions that a fetch session + // wants to remove from its session. + // + // See KIP-227 for more details. + ForgottenTopics []FetchRequestForgottenTopic // v7+ + + // Rack of the consumer making this request (see KIP-392; introduced in + // Kafka 2.2.0). + Rack string // v11+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +func (*FetchRequest) Key() int16 { return 1 } +func (*FetchRequest) MaxVersion() int16 { return 16 } +func (v *FetchRequest) SetVersion(version int16) { v.Version = version } +func (v *FetchRequest) GetVersion() int16 { return v.Version } +func (v *FetchRequest) IsFlexible() bool { return v.Version >= 12 } +func (v *FetchRequest) ResponseKind() Response { + r := &FetchResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *FetchRequest) RequestWith(ctx context.Context, r Requestor) (*FetchResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*FetchResponse) + return resp, err +} + +func (v *FetchRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 12 + _ = isFlexible + if version >= 0 && version <= 14 { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MaxWaitMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MinBytes + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.MaxBytes + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.IsolationLevel + dst = kbin.AppendInt8(dst, v) + } + if version >= 7 { + v := v.SessionID + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.SessionEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 12 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 13 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 9 { + v := v.CurrentLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.FetchOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 12 { + v := v.LastFetchedEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 5 { + v := v.LogStartOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.PartitionMaxBytes + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 7 { + v := v.ForgottenTopics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 7 && version <= 12 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 13 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 11 { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + var toEncode []uint32 + if v.ClusterID != nil { + toEncode = append(toEncode, 0) + } + if !reflect.DeepEqual(v.ReplicaState, (func() FetchRequestReplicaState { var v FetchRequestReplicaState; v.Default(); return v })()) { + toEncode = append(toEncode, 1) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.ClusterID + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fClusterID: + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fClusterID + } + } + case 1: + { + v := v.ReplicaState + dst = kbin.AppendUvarint(dst, 1) + sized := false + lenAt := len(dst) + fReplicaState: + { + v := v.ID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Epoch + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fReplicaState + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FetchRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FetchRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FetchRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 12 + _ = isFlexible + s := v + if version >= 0 && version <= 14 { + v := b.Int32() + s.ReplicaID = v + } + { + v := b.Int32() + s.MaxWaitMillis = v + } + { + v := b.Int32() + s.MinBytes = v + } + if version >= 3 { + v := b.Int32() + s.MaxBytes = v + } + if version >= 4 { + v := b.Int8() + s.IsolationLevel = v + } + if version >= 7 { + v := b.Int32() + s.SessionID = v + } + if version >= 7 { + v := b.Int32() + s.SessionEpoch = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 12 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 13 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + if version >= 9 { + v := b.Int32() + s.CurrentLeaderEpoch = v + } + { + v := b.Int64() + s.FetchOffset = v + } + if version >= 12 { + v := b.Int32() + s.LastFetchedEpoch = v + } + if version >= 5 { + v := b.Int64() + s.LogStartOffset = v + } + { + v := b.Int32() + s.PartitionMaxBytes = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 7 { + v := s.ForgottenTopics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchRequestForgottenTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 7 && version <= 12 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 13 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ForgottenTopics = v + } + if version >= 11 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Rack = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ClusterID = v + if err := b.Complete(); err != nil { + return err + } + case 1: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.ReplicaState + v.Default() + s := v + { + v := b.Int32() + s.ID = v + } + { + v := b.Int64() + s.Epoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrFetchRequest returns a pointer to a default FetchRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFetchRequest() *FetchRequest { + var v FetchRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequest. +func (v *FetchRequest) Default() { + v.ClusterID = nil + v.ReplicaID = -1 + { + v := &v.ReplicaState + _ = v + v.ID = -1 + v.Epoch = -1 + } + v.MaxBytes = 2147483647 + v.SessionEpoch = -1 +} + +// NewFetchRequest returns a default FetchRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequest() FetchRequest { + var v FetchRequest + v.Default() + return v +} + +type FetchResponseTopicPartitionDivergingEpoch struct { + // This field has a default of -1. + Epoch int32 + + // This field has a default of -1. + EndOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartitionDivergingEpoch. +func (v *FetchResponseTopicPartitionDivergingEpoch) Default() { + v.Epoch = -1 + v.EndOffset = -1 +} + +// NewFetchResponseTopicPartitionDivergingEpoch returns a default FetchResponseTopicPartitionDivergingEpoch +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartitionDivergingEpoch() FetchResponseTopicPartitionDivergingEpoch { + var v FetchResponseTopicPartitionDivergingEpoch + v.Default() + return v +} + +type FetchResponseTopicPartitionCurrentLeader struct { + // The ID of the current leader, or -1 if unknown. + // + // This field has a default of -1. + LeaderID int32 + + // The latest known leader epoch. + // + // This field has a default of -1. + LeaderEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartitionCurrentLeader. +func (v *FetchResponseTopicPartitionCurrentLeader) Default() { + v.LeaderID = -1 + v.LeaderEpoch = -1 +} + +// NewFetchResponseTopicPartitionCurrentLeader returns a default FetchResponseTopicPartitionCurrentLeader +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartitionCurrentLeader() FetchResponseTopicPartitionCurrentLeader { + var v FetchResponseTopicPartitionCurrentLeader + v.Default() + return v +} + +type FetchResponseTopicPartitionSnapshotID struct { + // This field has a default of -1. + EndOffset int64 + + // This field has a default of -1. + Epoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartitionSnapshotID. +func (v *FetchResponseTopicPartitionSnapshotID) Default() { + v.EndOffset = -1 + v.Epoch = -1 +} + +// NewFetchResponseTopicPartitionSnapshotID returns a default FetchResponseTopicPartitionSnapshotID +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartitionSnapshotID() FetchResponseTopicPartitionSnapshotID { + var v FetchResponseTopicPartitionSnapshotID + v.Default() + return v +} + +type FetchResponseTopicPartitionAbortedTransaction struct { + // ProducerID is the producer ID that caused this aborted transaction. + ProducerID int64 + + // FirstOffset is the offset where this aborted transaction began. + FirstOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartitionAbortedTransaction. +func (v *FetchResponseTopicPartitionAbortedTransaction) Default() { +} + +// NewFetchResponseTopicPartitionAbortedTransaction returns a default FetchResponseTopicPartitionAbortedTransaction +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartitionAbortedTransaction() FetchResponseTopicPartitionAbortedTransaction { + var v FetchResponseTopicPartitionAbortedTransaction + v.Default() + return v +} + +type FetchResponseTopicPartition struct { + // Partition is a partition in a topic that records may have been + // received for. + Partition int32 + + // ErrorCode is an error returned for an individual partition in a + // fetch request. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not + // authorized to read the partition. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic or partition + // does not exist on this broker. + // + // UNSUPPORTED_COMPRESSION_TYPE is returned if the request version was + // under 10 and the batch is compressed with zstd. + // + // UNSUPPORTED_VERSION is returned if the broker has records newer than + // the client can support (magic value) and the broker has disabled + // message downconversion. + // + // NOT_LEADER_FOR_PARTITION is returned if requesting data for this + // partition as a follower (non-negative ReplicaID) and the broker + // is not the leader for this partition. + // + // REPLICA_NOT_AVAILABLE is returned if the partition exists but + // the requested broker is not the leader for it. + // + // KAFKA_STORAGE_EXCEPTION is returned if the requested partition is + // offline. + // + // UNKNOWN_LEADER_EPOCH is returned if the request used a larger leader + // epoch than the broker knows of. + // + // FENCED_LEADER_EPOCH is returned if the request used a smaller leader + // epoch than the broker is at (see KIP-320). + // + // OFFSET_OUT_OF_RANGE is returned if requesting an offset past the + // current end offset or before the beginning offset. + // + // UNKNOWN_TOPIC_ID is returned if using uuid's and the uuid is unknown + // (v13+ / Kafka 3.1+). + // + // OFFSET_MOVED_TO_TIERED_STORAGE is returned if a follower is trying to + // fetch from an offset that is now in tiered storage. + ErrorCode int16 + + // HighWatermark is the current high watermark for this partition, + // that is, the current offset that is on all in sync replicas. + HighWatermark int64 + + // LastStableOffset is the offset at which all prior offsets have + // been "decided". Non transactional records are always decided + // immediately, but transactional records are only decided once + // they are commited or aborted. + // + // The LastStableOffset will always be at or under the HighWatermark. + // + // This field has a default of -1. + LastStableOffset int64 // v4+ + + // LogStartOffset is the beginning offset for this partition. + // This field was added for KIP-107. + // + // This field has a default of -1. + LogStartOffset int64 // v5+ + + // In case divergence is detected based on the LastFetchedEpoch and + // FetchOffset in the request, this field indicates the largest epoch and + // its end offset such that subsequent records are known to diverge. + DivergingEpoch FetchResponseTopicPartitionDivergingEpoch // tag 0 + + // CurrentLeader is the currently known leader ID and epoch for this + // partition. + CurrentLeader FetchResponseTopicPartitionCurrentLeader // tag 1 + + // In the case of fetching an offset less than the LogStartOffset, this + // is the end offset and epoch that should be used in the FetchSnapshot + // request. + SnapshotID FetchResponseTopicPartitionSnapshotID // tag 2 + + // AbortedTransactions is an array of aborted transactions within the + // returned offset range. This is only returned if the requested + // isolation level was READ_COMMITTED. + AbortedTransactions []FetchResponseTopicPartitionAbortedTransaction // v4+ + + // PreferredReadReplica is the preferred replica for the consumer + // to use on its next fetch request. See KIP-392. + // + // This field has a default of -1. + PreferredReadReplica int32 // v11+ + + // RecordBatches is an array of record batches for a topic partition. + // + // This is encoded as a raw byte array, with the standard int32 size + // prefix. One important catch to note is that the final element of the + // array may be **partial**. This is an optimization in Kafka that + // clients must deal with by discarding a partial trailing batch. + // + // Starting v2, this transitioned to the MessageSet v1 format (and this + // would contain many MessageV1 structs). + // + // Starting v4, this transitioned to the RecordBatch format (thus this + // contains many RecordBatch structs). + RecordBatches []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartition. +func (v *FetchResponseTopicPartition) Default() { + v.LastStableOffset = -1 + v.LogStartOffset = -1 + { + v := &v.DivergingEpoch + _ = v + v.Epoch = -1 + v.EndOffset = -1 + } + { + v := &v.CurrentLeader + _ = v + v.LeaderID = -1 + v.LeaderEpoch = -1 + } + { + v := &v.SnapshotID + _ = v + v.EndOffset = -1 + v.Epoch = -1 + } + v.PreferredReadReplica = -1 +} + +// NewFetchResponseTopicPartition returns a default FetchResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartition() FetchResponseTopicPartition { + var v FetchResponseTopicPartition + v.Default() + return v +} + +type FetchResponseTopic struct { + // Topic is a topic that records may have been received for. + Topic string // v0-v12 + + // TopicID is the uuid of a topic that records may have been received for. + TopicID [16]byte // v13+ + + // Partitions contains partitions in a topic that records may have + // been received for. + Partitions []FetchResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopic. +func (v *FetchResponseTopic) Default() { +} + +// NewFetchResponseTopic returns a default FetchResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopic() FetchResponseTopic { + var v FetchResponseTopic + v.Default() + return v +} + +type FetchResponseBroker struct { + // NodeID is the node ID of a Kafka broker. + NodeID int32 + + // Host is the hostname of a Kafka broker. + Host string + + // Port is the port of a Kafka broker. + Port int32 + + // Rack is the rack this Kafka broker is in. + Rack *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseBroker. +func (v *FetchResponseBroker) Default() { +} + +// NewFetchResponseBroker returns a default FetchResponseBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseBroker() FetchResponseBroker { + var v FetchResponseBroker + v.Default() + return v +} + +// FetchResponse is returned from a FetchRequest. +type FetchResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 8. + ThrottleMillis int32 // v1+ + + // ErrorCode is a full-response error code for a fetch request. This was + // added in support of KIP-227. This error is only non-zero if using fetch + // sessions. + // + // FETCH_SESSION_ID_NOT_FOUND is returned if the request used a + // session ID that the broker does not know of. + // + // INVALID_FETCH_SESSION_EPOCH is returned if the request used an + // invalid session epoch. + ErrorCode int16 // v7+ + + // SessionID is the id for this session if using sessions. + // + // See KIP-227 for more details. + SessionID int32 // v7+ + + // Topics contains an array of topic partitions and the records received + // for them. + Topics []FetchResponseTopic + + // Brokers is present if any partition responses contain the error + // NOT_LEADER_OR_FOLLOWER. + Brokers []FetchResponseBroker // tag 0 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +func (*FetchResponse) Key() int16 { return 1 } +func (*FetchResponse) MaxVersion() int16 { return 16 } +func (v *FetchResponse) SetVersion(version int16) { v.Version = version } +func (v *FetchResponse) GetVersion() int16 { return v.Version } +func (v *FetchResponse) IsFlexible() bool { return v.Version >= 12 } +func (v *FetchResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 8 } +func (v *FetchResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *FetchResponse) RequestKind() Request { return &FetchRequest{Version: v.Version} } + +func (v *FetchResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 12 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 7 { + v := v.SessionID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 12 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 13 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.HighWatermark + dst = kbin.AppendInt64(dst, v) + } + if version >= 4 { + v := v.LastStableOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 5 { + v := v.LogStartOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 4 { + v := v.AbortedTransactions + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.FirstOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 11 { + v := v.PreferredReadReplica + dst = kbin.AppendInt32(dst, v) + } + { + v := v.RecordBatches + if isFlexible { + dst = kbin.AppendCompactNullableBytes(dst, v) + } else { + dst = kbin.AppendNullableBytes(dst, v) + } + } + if isFlexible { + var toEncode []uint32 + if !reflect.DeepEqual(v.DivergingEpoch, (func() FetchResponseTopicPartitionDivergingEpoch { + var v FetchResponseTopicPartitionDivergingEpoch + v.Default() + return v + })()) { + toEncode = append(toEncode, 0) + } + if !reflect.DeepEqual(v.CurrentLeader, (func() FetchResponseTopicPartitionCurrentLeader { + var v FetchResponseTopicPartitionCurrentLeader + v.Default() + return v + })()) { + toEncode = append(toEncode, 1) + } + if !reflect.DeepEqual(v.SnapshotID, (func() FetchResponseTopicPartitionSnapshotID { + var v FetchResponseTopicPartitionSnapshotID + v.Default() + return v + })()) { + toEncode = append(toEncode, 2) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.DivergingEpoch + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fDivergingEpoch: + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fDivergingEpoch + } + } + case 1: + { + v := v.CurrentLeader + dst = kbin.AppendUvarint(dst, 1) + sized := false + lenAt := len(dst) + fCurrentLeader: + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fCurrentLeader + } + } + case 2: + { + v := v.SnapshotID + dst = kbin.AppendUvarint(dst, 2) + sized := false + lenAt := len(dst) + fSnapshotID: + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fSnapshotID + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + var toEncode []uint32 + if len(v.Brokers) > 0 { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.Brokers + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fBrokers: + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fBrokers + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FetchResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FetchResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FetchResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 12 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 7 { + v := b.Int16() + s.ErrorCode = v + } + if version >= 7 { + v := b.Int32() + s.SessionID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 12 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 13 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.HighWatermark = v + } + if version >= 4 { + v := b.Int64() + s.LastStableOffset = v + } + if version >= 5 { + v := b.Int64() + s.LogStartOffset = v + } + if version >= 4 { + v := s.AbortedTransactions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []FetchResponseTopicPartitionAbortedTransaction{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchResponseTopicPartitionAbortedTransaction, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int64() + s.FirstOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.AbortedTransactions = v + } + if version >= 11 { + v := b.Int32() + s.PreferredReadReplica = v + } + { + var v []byte + if isFlexible { + v = b.CompactNullableBytes() + } else { + v = b.NullableBytes() + } + s.RecordBatches = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.DivergingEpoch + v.Default() + s := v + { + v := b.Int32() + s.Epoch = v + } + { + v := b.Int64() + s.EndOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + case 1: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.CurrentLeader + v.Default() + s := v + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + case 2: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.SnapshotID + v.Default() + s := v + { + v := b.Int64() + s.EndOffset = v + } + { + v := b.Int32() + s.Epoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + } + } + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := s.Brokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchResponseBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Brokers = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrFetchResponse returns a pointer to a default FetchResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFetchResponse() *FetchResponse { + var v FetchResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponse. +func (v *FetchResponse) Default() { +} + +// NewFetchResponse returns a default FetchResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponse() FetchResponse { + var v FetchResponse + v.Default() + return v +} + +type ListOffsetsRequestTopicPartition struct { + // Partition is a partition of a topic to get offsets for. + Partition int32 + + // CurrentLeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // allows brokers to check if the client is fenced (has an out of date + // leader) or is using an unknown leader. + // + // The initial leader epoch can be determined from a MetadataResponse. + // To skip log truncation checking, use -1. + // + // This field has a default of -1. + CurrentLeaderEpoch int32 // v4+ + + // Timestamp controls which offset to return in a response for this + // partition. + // + // The offset returned will be the one of the message whose timestamp is + // the first timestamp greater than or equal to this requested timestamp. + // + // If no such message is found, then no offset is returned (-1). + // + // There exist two special timestamps: -2 corresponds to the earliest + // timestamp, and -1 corresponds to the latest. + // + // If you are talking to Kafka 3.0+, there exists an additional special + // timestamp -3 that returns the latest timestamp produced so far and its + // corresponding offset. This is subtly different from the latest offset, + // because timestamps are client-side generated. More importantly though, + // because this returns the latest produced timestamp, this can be used + // to determine topic "liveness" (when was the last produce?). + // Previously, this was not easy to determine. See KIP-734 for more + // detail. + // + // If you are talking to Kafka 3.4+ and using request version 8+ (for + // KIP-405), the new special timestamp -4 returns the local log start + // offset. In the context of tiered storage, the earliest local log start + // offset is the offset actually available on disk on the broker. + Timestamp int64 + + // MaxNumOffsets is the maximum number of offsets to report. + // This was removed after v0. + // + // This field has a default of 1. + MaxNumOffsets int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsRequestTopicPartition. +func (v *ListOffsetsRequestTopicPartition) Default() { + v.CurrentLeaderEpoch = -1 + v.MaxNumOffsets = 1 +} + +// NewListOffsetsRequestTopicPartition returns a default ListOffsetsRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsRequestTopicPartition() ListOffsetsRequestTopicPartition { + var v ListOffsetsRequestTopicPartition + v.Default() + return v +} + +type ListOffsetsRequestTopic struct { + // Topic is a topic to get offsets for. + Topic string + + // Partitions is an array of partitions in a topic to get offsets for. + Partitions []ListOffsetsRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsRequestTopic. +func (v *ListOffsetsRequestTopic) Default() { +} + +// NewListOffsetsRequestTopic returns a default ListOffsetsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsRequestTopic() ListOffsetsRequestTopic { + var v ListOffsetsRequestTopic + v.Default() + return v +} + +// ListOffsetsRequest requests partition offsets from Kafka for use in +// consuming records. +// +// Version 5, introduced in Kafka 2.2.0, is the same as version 4. Using +// version 5 implies you support Kafka's OffsetNotAvailableException +// See KIP-207 for details. +// +// Version 7, introduced in Kafka 3.0, supports -3 as a timestamp to return +// the timestamp and offset for the record with the largest timestamp. +// +// Version 8, introduced in Kafka 3.4, supports -4 as a timestamp to return +// the local log start offset (in the context of tiered storage, see KIP-405). +type ListOffsetsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ReplicaID is the broker ID to get offsets from. As a Kafka client, use -1. + // The consumer replica ID (-1) causes requests to only succeed if issued + // against the leader broker. + // + // This field has a default of -1. + ReplicaID int32 + + // IsolationLevel configures which record offsets are visible in the + // response. READ_UNCOMMITTED (0) makes all records visible. READ_COMMITTED + // (1) makes non-transactional and committed transactional records visible. + // READ_COMMITTED means all offsets smaller than the last stable offset and + // includes aborted transactions (allowing consumers to discard aborted + // records). + IsolationLevel int8 // v2+ + + // Topics is an array of topics to get offsets for. + Topics []ListOffsetsRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*ListOffsetsRequest) Key() int16 { return 2 } +func (*ListOffsetsRequest) MaxVersion() int16 { return 8 } +func (v *ListOffsetsRequest) SetVersion(version int16) { v.Version = version } +func (v *ListOffsetsRequest) GetVersion() int16 { return v.Version } +func (v *ListOffsetsRequest) IsFlexible() bool { return v.Version >= 6 } +func (v *ListOffsetsRequest) ResponseKind() Response { + r := &ListOffsetsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ListOffsetsRequest) RequestWith(ctx context.Context, r Requestor) (*ListOffsetsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ListOffsetsResponse) + return resp, err +} + +func (v *ListOffsetsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + if version >= 2 { + v := v.IsolationLevel + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.CurrentLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Timestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 0 && version <= 0 { + v := v.MaxNumOffsets + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListOffsetsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListOffsetsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListOffsetsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + { + v := b.Int32() + s.ReplicaID = v + } + if version >= 2 { + v := b.Int8() + s.IsolationLevel = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListOffsetsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListOffsetsRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + if version >= 4 { + v := b.Int32() + s.CurrentLeaderEpoch = v + } + { + v := b.Int64() + s.Timestamp = v + } + if version >= 0 && version <= 0 { + v := b.Int32() + s.MaxNumOffsets = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListOffsetsRequest returns a pointer to a default ListOffsetsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListOffsetsRequest() *ListOffsetsRequest { + var v ListOffsetsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsRequest. +func (v *ListOffsetsRequest) Default() { + v.ReplicaID = -1 +} + +// NewListOffsetsRequest returns a default ListOffsetsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsRequest() ListOffsetsRequest { + var v ListOffsetsRequest + v.Default() + return v +} + +type ListOffsetsResponseTopicPartition struct { + // Partition is the partition this array slot is for. + Partition int32 + + // ErrorCode is any error for a topic partition in a ListOffsets request. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // to describe the topic. + // + // INVALID_REQUEST is returned if the requested topic partitions had + // contained duplicates. + // + // KAFKA_STORAGE_EXCEPTION is returned if the topic / partition is in + // an offline log directory. + // + // UNSUPPORTED_FOR_MESSAGE_FORMAT is returned if the broker is using + // Kafka 0.10.0 messages and the requested timestamp was not -1 nor -2. + // + // NOT_LEADER_FOR_PARTITION is returned if the broker is not a leader + // for this partition. This means that the client has stale metadata. + // If the request used the debug replica ID, the returned error will + // be REPLICA_NOT_AVAILABLE. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know + // of the requested topic or partition. + // + // FENCED_LEADER_EPOCH is returned if the broker has a higher leader + // epoch than what the request sent. + // + // UNKNOWN_LEADER_EPOCH is returned if the request used a leader epoch + // that the broker does not know about. + // + // OFFSET_NOT_AVAILABLE, introduced in Kafka 2.2.0 with produce request + // v5+, is returned when talking to a broker that is a new leader while + // that broker's high water mark catches up. This avoids situations where + // the old broker returned higher offsets than the new broker would. Note + // that if unclean leader election is allowed, you could still run into + // the situation where offsets returned from list offsets requests are + // not monotonically increasing. This error is only returned if the + // request used the consumer replica ID (-1). If the client did not use + // a v5+ list offsets request, LEADER_NOT_AVAILABLE is returned. + // See KIP-207 for more details. + ErrorCode int16 + + // OldStyleOffsets is a list of offsets. This was removed after + // version 0 and, since it is so historic, is undocumented. + OldStyleOffsets []int64 + + // If the request was for the earliest or latest timestamp (-2 or -1), or + // if an offset could not be found after the requested one, this will be -1. + // + // This field has a default of -1. + Timestamp int64 // v1+ + + // Offset is the offset corresponding to the record on or after the + // requested timestamp. If one could not be found, this will be -1. + // + // This field has a default of -1. + Offset int64 // v1+ + + // LeaderEpoch is the leader epoch of the record at this offset, + // or -1 if there was no leader epoch. + // + // This field has a default of -1. + LeaderEpoch int32 // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsResponseTopicPartition. +func (v *ListOffsetsResponseTopicPartition) Default() { + v.Timestamp = -1 + v.Offset = -1 + v.LeaderEpoch = -1 +} + +// NewListOffsetsResponseTopicPartition returns a default ListOffsetsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsResponseTopicPartition() ListOffsetsResponseTopicPartition { + var v ListOffsetsResponseTopicPartition + v.Default() + return v +} + +type ListOffsetsResponseTopic struct { + // Topic is the topic this array slot is for. + Topic string + + // Partitions is an array of partition responses corresponding to + // the requested partitions for a topic. + Partitions []ListOffsetsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsResponseTopic. +func (v *ListOffsetsResponseTopic) Default() { +} + +// NewListOffsetsResponseTopic returns a default ListOffsetsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsResponseTopic() ListOffsetsResponseTopic { + var v ListOffsetsResponseTopic + v.Default() + return v +} + +// ListOffsetsResponse is returned from a ListOffsetsRequest. +type ListOffsetsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 3. + ThrottleMillis int32 // v2+ + + // Topics is an array of topic / partition responses corresponding to + // the requested topics and partitions. + Topics []ListOffsetsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*ListOffsetsResponse) Key() int16 { return 2 } +func (*ListOffsetsResponse) MaxVersion() int16 { return 8 } +func (v *ListOffsetsResponse) SetVersion(version int16) { v.Version = version } +func (v *ListOffsetsResponse) GetVersion() int16 { return v.Version } +func (v *ListOffsetsResponse) IsFlexible() bool { return v.Version >= 6 } +func (v *ListOffsetsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 3 } +func (v *ListOffsetsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ListOffsetsResponse) RequestKind() Request { return &ListOffsetsRequest{Version: v.Version} } + +func (v *ListOffsetsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + if version >= 2 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 0 && version <= 0 { + v := v.OldStyleOffsets + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt64(dst, v) + } + } + if version >= 1 { + v := v.Timestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 4 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListOffsetsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListOffsetsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListOffsetsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + if version >= 2 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListOffsetsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListOffsetsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 0 && version <= 0 { + v := s.OldStyleOffsets + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int64, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int64() + a[i] = v + } + v = a + s.OldStyleOffsets = v + } + if version >= 1 { + v := b.Int64() + s.Timestamp = v + } + if version >= 1 { + v := b.Int64() + s.Offset = v + } + if version >= 4 { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListOffsetsResponse returns a pointer to a default ListOffsetsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListOffsetsResponse() *ListOffsetsResponse { + var v ListOffsetsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsResponse. +func (v *ListOffsetsResponse) Default() { +} + +// NewListOffsetsResponse returns a default ListOffsetsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsResponse() ListOffsetsResponse { + var v ListOffsetsResponse + v.Default() + return v +} + +type MetadataRequestTopic struct { + // The topic ID. Only one of either topic ID or topic name should be used. + // If using the topic name, this should just be the default empty value. + TopicID [16]byte // v10+ + + // Topic is the topic to request metadata for. Version 10 switched this + // from a string to a nullable string; if using a topic ID, this field + // should be null. + Topic *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataRequestTopic. +func (v *MetadataRequestTopic) Default() { +} + +// NewMetadataRequestTopic returns a default MetadataRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataRequestTopic() MetadataRequestTopic { + var v MetadataRequestTopic + v.Default() + return v +} + +// MetadataRequest requests metadata from Kafka. +type MetadataRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is a list of topics to return metadata about. If this is null + // in v1+, all topics are included. If this is empty, no topics are. + // For v0 (= 9 } +func (v *MetadataRequest) ResponseKind() Response { + r := &MetadataResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *MetadataRequest) RequestWith(ctx context.Context, r Requestor) (*MetadataResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*MetadataResponse) + return resp, err +} + +func (v *MetadataRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + { + v := v.Topics + if version >= 1 { + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + } else { + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + } + for i := range v { + v := &v[i] + if version >= 10 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Topic + if version < 10 { + var vv string + if v != nil { + vv = *v + } + { + v := vv + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } else { + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 4 { + v := v.AllowAutoTopicCreation + dst = kbin.AppendBool(dst, v) + } + if version >= 8 && version <= 10 { + v := v.IncludeClusterAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if version >= 8 { + v := v.IncludeTopicAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *MetadataRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *MetadataRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *MetadataRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 1 || l == 0 { + a = []MetadataRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]MetadataRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 10 { + v := b.Uuid() + s.TopicID = v + } + { + var v *string + if version < 10 { + var vv string + if isFlexible { + if unsafe { + vv = b.UnsafeCompactString() + } else { + vv = b.CompactString() + } + } else { + if unsafe { + vv = b.UnsafeString() + } else { + vv = b.String() + } + } + v = &vv + } else { + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + } + s.Topic = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 4 { + v := b.Bool() + s.AllowAutoTopicCreation = v + } + if version >= 8 && version <= 10 { + v := b.Bool() + s.IncludeClusterAuthorizedOperations = v + } + if version >= 8 { + v := b.Bool() + s.IncludeTopicAuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrMetadataRequest returns a pointer to a default MetadataRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrMetadataRequest() *MetadataRequest { + var v MetadataRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataRequest. +func (v *MetadataRequest) Default() { +} + +// NewMetadataRequest returns a default MetadataRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataRequest() MetadataRequest { + var v MetadataRequest + v.Default() + return v +} + +type MetadataResponseBroker struct { + // NodeID is the node ID of a Kafka broker. + NodeID int32 + + // Host is the hostname of a Kafka broker. + Host string + + // Port is the port of a Kafka broker. + Port int32 + + // Rack is the rack this Kafka broker is in. + Rack *string // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataResponseBroker. +func (v *MetadataResponseBroker) Default() { +} + +// NewMetadataResponseBroker returns a default MetadataResponseBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataResponseBroker() MetadataResponseBroker { + var v MetadataResponseBroker + v.Default() + return v +} + +type MetadataResponseTopicPartition struct { + // ErrorCode is any error for a partition in topic metadata. + // + // LEADER_NOT_AVAILABLE is returned if a leader is unavailable for this + // partition. For v0 metadata responses, this is also returned if a + // partition leader's listener does not exist. + // + // LISTENER_NOT_FOUND is returned if a leader ID is known but the + // listener for it is not (v1+). + // + // REPLICA_NOT_AVAILABLE is returned in v0 responses if any replica is + // unavailable. + // + // UNKNOWN_TOPIC_ID is returned if using a topic ID and the ID does not + // exist. + ErrorCode int16 + + // Partition is a partition number for a topic. + Partition int32 + + // Leader is the broker leader for this partition. This will be -1 + // on leader / listener error. + Leader int32 + + // LeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0 is the + // epoch of the broker leader. + // + // This field has a default of -1. + LeaderEpoch int32 // v7+ + + // Replicas returns all broker IDs containing replicas of this partition. + Replicas []int32 + + // ISR returns all broker IDs of in-sync replicas of this partition. + ISR []int32 + + // OfflineReplicas, proposed in KIP-112 and introduced in Kafka 1.0, + // returns all offline broker IDs that should be replicating this partition. + OfflineReplicas []int32 // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataResponseTopicPartition. +func (v *MetadataResponseTopicPartition) Default() { + v.LeaderEpoch = -1 +} + +// NewMetadataResponseTopicPartition returns a default MetadataResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataResponseTopicPartition() MetadataResponseTopicPartition { + var v MetadataResponseTopicPartition + v.Default() + return v +} + +type MetadataResponseTopic struct { + // ErrorCode is any error for a topic in a metadata request. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // to describe the topic, or if the metadata request specified topic auto + // creation, the topic did not exist, and the user lacks permission to create. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if a topic does not exist and + // the request did not specify autocreation. + // + // LEADER_NOT_AVAILABLE is returned if a new topic is created successfully + // (since there is no leader on an immediately new topic). + // + // There can be a myriad of other errors for unsuccessful topic creation. + ErrorCode int16 + + // Topic is the topic this metadata corresponds to. + Topic *string + + // The topic ID. + TopicID [16]byte // v10+ + + // IsInternal signifies whether this topic is a Kafka internal topic. + IsInternal bool // v1+ + + // Partitions contains metadata about partitions for a topic. + Partitions []MetadataResponseTopicPartition + + // AuthorizedOperations, proposed in KIP-430 and introduced in Kafka 2.3.0, + // is a bitfield (corresponding to AclOperation) containing which operations + // the client is allowed to perform on this topic. + // This is only returned if requested. + // + // This field has a default of -2147483648. + AuthorizedOperations int32 // v8+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataResponseTopic. +func (v *MetadataResponseTopic) Default() { + v.AuthorizedOperations = -2147483648 +} + +// NewMetadataResponseTopic returns a default MetadataResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataResponseTopic() MetadataResponseTopic { + var v MetadataResponseTopic + v.Default() + return v +} + +// MetadataResponse is returned from a MetdataRequest. +type MetadataResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 6. + ThrottleMillis int32 // v3+ + + // Brokers is a set of alive Kafka brokers. + Brokers []MetadataResponseBroker + + // ClusterID, proposed in KIP-78 and introduced in Kafka 0.10.1.0, is a + // unique string specifying the cluster that the replying Kafka belongs to. + ClusterID *string // v2+ + + // ControllerID is the ID of the controller broker (the admin broker). + // + // This field has a default of -1. + ControllerID int32 // v1+ + + // Topics contains metadata about each topic requested in the + // MetadataRequest. + Topics []MetadataResponseTopic + + // AuthorizedOperations is a bitfield containing which operations the client + // is allowed to perform on this cluster. + // + // This field has a default of -2147483648. + AuthorizedOperations int32 // v8-v10 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +func (*MetadataResponse) Key() int16 { return 3 } +func (*MetadataResponse) MaxVersion() int16 { return 12 } +func (v *MetadataResponse) SetVersion(version int16) { v.Version = version } +func (v *MetadataResponse) GetVersion() int16 { return v.Version } +func (v *MetadataResponse) IsFlexible() bool { return v.Version >= 9 } +func (v *MetadataResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 6 } +func (v *MetadataResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *MetadataResponse) RequestKind() Request { return &MetadataRequest{Version: v.Version} } + +func (v *MetadataResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + if version >= 3 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Brokers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 2 { + v := v.ClusterID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topic + if version < 12 { + var vv string + if v != nil { + vv = *v + } + { + v := vv + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } else { + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + } + if version >= 10 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + if version >= 1 { + v := v.IsInternal + dst = kbin.AppendBool(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 5 { + v := v.OfflineReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 { + v := v.AuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 && version <= 10 { + v := v.AuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *MetadataResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *MetadataResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *MetadataResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + s := v + if version >= 3 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Brokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]MetadataResponseBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + if version >= 1 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Brokers = v + } + if version >= 2 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ClusterID = v + } + if version >= 1 { + v := b.Int32() + s.ControllerID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]MetadataResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if version < 12 { + var vv string + if isFlexible { + if unsafe { + vv = b.UnsafeCompactString() + } else { + vv = b.CompactString() + } + } else { + if unsafe { + vv = b.UnsafeString() + } else { + vv = b.String() + } + } + v = &vv + } else { + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + } + s.Topic = v + } + if version >= 10 { + v := b.Uuid() + s.TopicID = v + } + if version >= 1 { + v := b.Bool() + s.IsInternal = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]MetadataResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.Leader = v + } + if version >= 7 { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + if version >= 5 { + v := s.OfflineReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.OfflineReplicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if version >= 8 { + v := b.Int32() + s.AuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 8 && version <= 10 { + v := b.Int32() + s.AuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrMetadataResponse returns a pointer to a default MetadataResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrMetadataResponse() *MetadataResponse { + var v MetadataResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataResponse. +func (v *MetadataResponse) Default() { + v.ControllerID = -1 + v.AuthorizedOperations = -2147483648 +} + +// NewMetadataResponse returns a default MetadataResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataResponse() MetadataResponse { + var v MetadataResponse + v.Default() + return v +} + +// LeaderAndISRRequestTopicPartition is a common struct that is used across +// different versions of LeaderAndISRRequest. +type LeaderAndISRRequestTopicPartition struct { + Topic string // v0-v1 + + Partition int32 + + ControllerEpoch int32 + + Leader int32 + + LeaderEpoch int32 + + ISR []int32 + + ZKVersion int32 + + Replicas []int32 + + AddingReplicas []int32 // v3+ + + RemovingReplicas []int32 // v3+ + + IsNew bool // v1+ + + LeaderRecoveryState int8 // v6+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRRequestTopicPartition. +func (v *LeaderAndISRRequestTopicPartition) Default() { +} + +// NewLeaderAndISRRequestTopicPartition returns a default LeaderAndISRRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRRequestTopicPartition() LeaderAndISRRequestTopicPartition { + var v LeaderAndISRRequestTopicPartition + v.Default() + return v +} + +// LeaderAndISRResponseTopicPartition is a common struct that is used across +// different versions of LeaderAndISRResponse. +type LeaderAndISRResponseTopicPartition struct { + Topic string // v0-v4 + + Partition int32 + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRResponseTopicPartition. +func (v *LeaderAndISRResponseTopicPartition) Default() { +} + +// NewLeaderAndISRResponseTopicPartition returns a default LeaderAndISRResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRResponseTopicPartition() LeaderAndISRResponseTopicPartition { + var v LeaderAndISRResponseTopicPartition + v.Default() + return v +} + +type LeaderAndISRRequestTopicState struct { + Topic string + + TopicID [16]byte // v5+ + + PartitionStates []LeaderAndISRRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRRequestTopicState. +func (v *LeaderAndISRRequestTopicState) Default() { +} + +// NewLeaderAndISRRequestTopicState returns a default LeaderAndISRRequestTopicState +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRRequestTopicState() LeaderAndISRRequestTopicState { + var v LeaderAndISRRequestTopicState + v.Default() + return v +} + +type LeaderAndISRRequestLiveLeader struct { + BrokerID int32 + + Host string + + Port int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRRequestLiveLeader. +func (v *LeaderAndISRRequestLiveLeader) Default() { +} + +// NewLeaderAndISRRequestLiveLeader returns a default LeaderAndISRRequestLiveLeader +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRRequestLiveLeader() LeaderAndISRRequestLiveLeader { + var v LeaderAndISRRequestLiveLeader + v.Default() + return v +} + +// LeaderAndISRRequest is an advanced request that controller brokers use +// to broadcast state to other brokers. Manually using this request is a +// great way to break your cluster. +// +// As this is an advanced request and there is little reason to issue it as a +// client, this request is undocumented. +// +// Kafka 1.0 introduced version 1. Kafka 2.2 introduced version 2, proposed +// in KIP-380, which changed the layout of the struct to be more memory +// efficient. Kafka 2.4.0 introduced version 3 with KIP-455. +// Kafka 3.4 introduced version 7 with KIP-866. +type LeaderAndISRRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ControllerID int32 + + // If KRaft controller id is used during migration. See KIP-866. + IsKRaftController bool // v7+ + + ControllerEpoch int32 + + // This field has a default of -1. + BrokerEpoch int64 // v2+ + + Type int8 // v5+ + + PartitionStates []LeaderAndISRRequestTopicPartition // v0-v1 + + TopicStates []LeaderAndISRRequestTopicState // v2+ + + LiveLeaders []LeaderAndISRRequestLiveLeader + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*LeaderAndISRRequest) Key() int16 { return 4 } +func (*LeaderAndISRRequest) MaxVersion() int16 { return 7 } +func (v *LeaderAndISRRequest) SetVersion(version int16) { v.Version = version } +func (v *LeaderAndISRRequest) GetVersion() int16 { return v.Version } +func (v *LeaderAndISRRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *LeaderAndISRRequest) ResponseKind() Response { + r := &LeaderAndISRResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *LeaderAndISRRequest) RequestWith(ctx context.Context, r Requestor) (*LeaderAndISRResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*LeaderAndISRResponse) + return resp, err +} + +func (v *LeaderAndISRRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.IsKRaftController + dst = kbin.AppendBool(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 2 { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if version >= 5 { + v := v.Type + dst = kbin.AppendInt8(dst, v) + } + if version >= 0 && version <= 1 { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 1 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ZKVersion + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.AddingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.RemovingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 1 { + v := v.IsNew + dst = kbin.AppendBool(dst, v) + } + if version >= 6 { + v := v.LeaderRecoveryState + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 2 { + v := v.TopicStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 5 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 1 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ZKVersion + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.AddingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.RemovingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 1 { + v := v.IsNew + dst = kbin.AppendBool(dst, v) + } + if version >= 6 { + v := v.LeaderRecoveryState + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.LiveLeaders + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaderAndISRRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaderAndISRRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaderAndISRRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + v := b.Int32() + s.ControllerID = v + } + if version >= 7 { + v := b.Bool() + s.IsKRaftController = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + if version >= 2 { + v := b.Int64() + s.BrokerEpoch = v + } + if version >= 5 { + v := b.Int8() + s.Type = v + } + if version >= 0 && version <= 1 { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + { + v := b.Int32() + s.Leader = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + { + v := b.Int32() + s.ZKVersion = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if version >= 3 { + v := s.AddingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.AddingReplicas = v + } + if version >= 3 { + v := s.RemovingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.RemovingReplicas = v + } + if version >= 1 { + v := b.Bool() + s.IsNew = v + } + if version >= 6 { + v := b.Int8() + s.LeaderRecoveryState = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if version >= 2 { + v := s.TopicStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRRequestTopicState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 5 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + { + v := b.Int32() + s.Leader = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + { + v := b.Int32() + s.ZKVersion = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if version >= 3 { + v := s.AddingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.AddingReplicas = v + } + if version >= 3 { + v := s.RemovingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.RemovingReplicas = v + } + if version >= 1 { + v := b.Bool() + s.IsNew = v + } + if version >= 6 { + v := b.Int8() + s.LeaderRecoveryState = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TopicStates = v + } + { + v := s.LiveLeaders + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRRequestLiveLeader, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.LiveLeaders = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrLeaderAndISRRequest returns a pointer to a default LeaderAndISRRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrLeaderAndISRRequest() *LeaderAndISRRequest { + var v LeaderAndISRRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRRequest. +func (v *LeaderAndISRRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewLeaderAndISRRequest returns a default LeaderAndISRRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRRequest() LeaderAndISRRequest { + var v LeaderAndISRRequest + v.Default() + return v +} + +type LeaderAndISRResponseTopic struct { + TopicID [16]byte + + Partitions []LeaderAndISRResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRResponseTopic. +func (v *LeaderAndISRResponseTopic) Default() { +} + +// NewLeaderAndISRResponseTopic returns a default LeaderAndISRResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRResponseTopic() LeaderAndISRResponseTopic { + var v LeaderAndISRResponseTopic + v.Default() + return v +} + +// LeaderAndISRResponse is returned from a LeaderAndISRRequest. +type LeaderAndISRResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Partitions []LeaderAndISRResponseTopicPartition // v0-v4 + + Topics []LeaderAndISRResponseTopic // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*LeaderAndISRResponse) Key() int16 { return 4 } +func (*LeaderAndISRResponse) MaxVersion() int16 { return 7 } +func (v *LeaderAndISRResponse) SetVersion(version int16) { v.Version = version } +func (v *LeaderAndISRResponse) GetVersion() int16 { return v.Version } +func (v *LeaderAndISRResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *LeaderAndISRResponse) RequestKind() Request { return &LeaderAndISRRequest{Version: v.Version} } + +func (v *LeaderAndISRResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 0 && version <= 4 { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 4 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 5 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 4 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaderAndISRResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaderAndISRResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaderAndISRResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 0 && version <= 4 { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if version >= 5 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrLeaderAndISRResponse returns a pointer to a default LeaderAndISRResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrLeaderAndISRResponse() *LeaderAndISRResponse { + var v LeaderAndISRResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRResponse. +func (v *LeaderAndISRResponse) Default() { +} + +// NewLeaderAndISRResponse returns a default LeaderAndISRResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRResponse() LeaderAndISRResponse { + var v LeaderAndISRResponse + v.Default() + return v +} + +type StopReplicaRequestTopicPartitionState struct { + Partition int32 + + // This field has a default of -1. + LeaderEpoch int32 + + Delete bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaRequestTopicPartitionState. +func (v *StopReplicaRequestTopicPartitionState) Default() { + v.LeaderEpoch = -1 +} + +// NewStopReplicaRequestTopicPartitionState returns a default StopReplicaRequestTopicPartitionState +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaRequestTopicPartitionState() StopReplicaRequestTopicPartitionState { + var v StopReplicaRequestTopicPartitionState + v.Default() + return v +} + +type StopReplicaRequestTopic struct { + Topic string + + Partition int32 + + Partitions []int32 // v1-v2 + + PartitionStates []StopReplicaRequestTopicPartitionState // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaRequestTopic. +func (v *StopReplicaRequestTopic) Default() { +} + +// NewStopReplicaRequestTopic returns a default StopReplicaRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaRequestTopic() StopReplicaRequestTopic { + var v StopReplicaRequestTopic + v.Default() + return v +} + +// StopReplicaRequest is an advanced request that brokers use to stop replicas. +// +// As this is an advanced request and there is little reason to issue it as a +// client, this request is undocumented. +// +// Kafka 2.2 introduced version 1, proposed in KIP-380, which changed the +// layout of the struct to be more memory efficient. +// +// Kafka 2.6 introduced version 3, proposed in KIP-570, reorganizes partitions +// to be stored and adds the leader epoch and delete partition fields per partition. +// Kafka 3.4 introduced version 4 with KIP-866. +type StopReplicaRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ControllerID int32 + + ControllerEpoch int32 + + // If KRaft controller id is used during migration. See KIP-866. + IsKRaftController bool // v4+ + + // This field has a default of -1. + BrokerEpoch int64 // v1+ + + DeletePartitions bool // v0-v2 + + Topics []StopReplicaRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*StopReplicaRequest) Key() int16 { return 5 } +func (*StopReplicaRequest) MaxVersion() int16 { return 4 } +func (v *StopReplicaRequest) SetVersion(version int16) { v.Version = version } +func (v *StopReplicaRequest) GetVersion() int16 { return v.Version } +func (v *StopReplicaRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *StopReplicaRequest) ResponseKind() Response { + r := &StopReplicaResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *StopReplicaRequest) RequestWith(ctx context.Context, r Requestor) (*StopReplicaResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*StopReplicaResponse) + return resp, err +} + +func (v *StopReplicaRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.IsKRaftController + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if version >= 0 && version <= 2 { + v := v.DeletePartitions + dst = kbin.AppendBool(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 0 { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 && version <= 2 { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Delete + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *StopReplicaRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *StopReplicaRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *StopReplicaRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ControllerID = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + if version >= 4 { + v := b.Bool() + s.IsKRaftController = v + } + if version >= 1 { + v := b.Int64() + s.BrokerEpoch = v + } + if version >= 0 && version <= 2 { + v := b.Bool() + s.DeletePartitions = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]StopReplicaRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 0 && version <= 0 { + v := b.Int32() + s.Partition = v + } + if version >= 1 && version <= 2 { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if version >= 3 { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]StopReplicaRequestTopicPartitionState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := b.Bool() + s.Delete = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrStopReplicaRequest returns a pointer to a default StopReplicaRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrStopReplicaRequest() *StopReplicaRequest { + var v StopReplicaRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaRequest. +func (v *StopReplicaRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewStopReplicaRequest returns a default StopReplicaRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaRequest() StopReplicaRequest { + var v StopReplicaRequest + v.Default() + return v +} + +type StopReplicaResponsePartition struct { + Topic string + + Partition int32 + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaResponsePartition. +func (v *StopReplicaResponsePartition) Default() { +} + +// NewStopReplicaResponsePartition returns a default StopReplicaResponsePartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaResponsePartition() StopReplicaResponsePartition { + var v StopReplicaResponsePartition + v.Default() + return v +} + +// StopReplicasResponse is returned from a StopReplicasRequest. +type StopReplicaResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Version 3 returns FENCED_LEADER_EPOCH if the leader is stale (KIP-570). + ErrorCode int16 + + Partitions []StopReplicaResponsePartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*StopReplicaResponse) Key() int16 { return 5 } +func (*StopReplicaResponse) MaxVersion() int16 { return 4 } +func (v *StopReplicaResponse) SetVersion(version int16) { v.Version = version } +func (v *StopReplicaResponse) GetVersion() int16 { return v.Version } +func (v *StopReplicaResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *StopReplicaResponse) RequestKind() Request { return &StopReplicaRequest{Version: v.Version} } + +func (v *StopReplicaResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *StopReplicaResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *StopReplicaResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *StopReplicaResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]StopReplicaResponsePartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrStopReplicaResponse returns a pointer to a default StopReplicaResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrStopReplicaResponse() *StopReplicaResponse { + var v StopReplicaResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaResponse. +func (v *StopReplicaResponse) Default() { +} + +// NewStopReplicaResponse returns a default StopReplicaResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaResponse() StopReplicaResponse { + var v StopReplicaResponse + v.Default() + return v +} + +type UpdateMetadataRequestTopicPartition struct { + Topic string // v0-v4 + + Partition int32 + + ControllerEpoch int32 + + Leader int32 + + LeaderEpoch int32 + + ISR []int32 + + ZKVersion int32 + + Replicas []int32 + + OfflineReplicas []int32 // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequestTopicPartition. +func (v *UpdateMetadataRequestTopicPartition) Default() { +} + +// NewUpdateMetadataRequestTopicPartition returns a default UpdateMetadataRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequestTopicPartition() UpdateMetadataRequestTopicPartition { + var v UpdateMetadataRequestTopicPartition + v.Default() + return v +} + +type UpdateMetadataRequestTopicState struct { + Topic string + + TopicID [16]byte // v7+ + + PartitionStates []UpdateMetadataRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequestTopicState. +func (v *UpdateMetadataRequestTopicState) Default() { +} + +// NewUpdateMetadataRequestTopicState returns a default UpdateMetadataRequestTopicState +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequestTopicState() UpdateMetadataRequestTopicState { + var v UpdateMetadataRequestTopicState + v.Default() + return v +} + +type UpdateMetadataRequestLiveBrokerEndpoint struct { + Port int32 + + Host string + + ListenerName string // v3+ + + SecurityProtocol int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequestLiveBrokerEndpoint. +func (v *UpdateMetadataRequestLiveBrokerEndpoint) Default() { +} + +// NewUpdateMetadataRequestLiveBrokerEndpoint returns a default UpdateMetadataRequestLiveBrokerEndpoint +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequestLiveBrokerEndpoint() UpdateMetadataRequestLiveBrokerEndpoint { + var v UpdateMetadataRequestLiveBrokerEndpoint + v.Default() + return v +} + +type UpdateMetadataRequestLiveBroker struct { + ID int32 + + Host string + + Port int32 + + Endpoints []UpdateMetadataRequestLiveBrokerEndpoint // v1+ + + Rack *string // v2+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequestLiveBroker. +func (v *UpdateMetadataRequestLiveBroker) Default() { +} + +// NewUpdateMetadataRequestLiveBroker returns a default UpdateMetadataRequestLiveBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequestLiveBroker() UpdateMetadataRequestLiveBroker { + var v UpdateMetadataRequestLiveBroker + v.Default() + return v +} + +// UpdateMetadataRequest is an advanced request that brokers use to +// issue metadata updates to each other. +// +// As this is an advanced request and there is little reason to issue it as a +// client, this request is undocumented. +// +// Version 1 changed the layout of the live brokers. +// +// Kafka 2.2 introduced version 5, proposed in KIP-380, which changed the +// layout of the struct to be more memory efficient. +// Kafka 3.4 introduced version 8 with KIP-866. +type UpdateMetadataRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ControllerID int32 + + // If KRaft controller id is used during migration. See KIP-866. + IsKRaftController bool // v8+ + + ControllerEpoch int32 + + // This field has a default of -1. + BrokerEpoch int64 // v5+ + + PartitionStates []UpdateMetadataRequestTopicPartition // v0-v4 + + TopicStates []UpdateMetadataRequestTopicState // v5+ + + LiveBrokers []UpdateMetadataRequestLiveBroker + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*UpdateMetadataRequest) Key() int16 { return 6 } +func (*UpdateMetadataRequest) MaxVersion() int16 { return 8 } +func (v *UpdateMetadataRequest) SetVersion(version int16) { v.Version = version } +func (v *UpdateMetadataRequest) GetVersion() int16 { return v.Version } +func (v *UpdateMetadataRequest) IsFlexible() bool { return v.Version >= 6 } +func (v *UpdateMetadataRequest) ResponseKind() Response { + r := &UpdateMetadataResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *UpdateMetadataRequest) RequestWith(ctx context.Context, r Requestor) (*UpdateMetadataResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*UpdateMetadataResponse) + return resp, err +} + +func (v *UpdateMetadataRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + if version >= 8 { + v := v.IsKRaftController + dst = kbin.AppendBool(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 5 { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if version >= 0 && version <= 4 { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 4 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ZKVersion + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 4 { + v := v.OfflineReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 5 { + v := v.TopicStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 7 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 4 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ZKVersion + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 4 { + v := v.OfflineReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.LiveBrokers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ID + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 0 { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 0 { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.Endpoints + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.ListenerName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.SecurityProtocol + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 2 { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UpdateMetadataRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UpdateMetadataRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UpdateMetadataRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + { + v := b.Int32() + s.ControllerID = v + } + if version >= 8 { + v := b.Bool() + s.IsKRaftController = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + if version >= 5 { + v := b.Int64() + s.BrokerEpoch = v + } + if version >= 0 && version <= 4 { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + { + v := b.Int32() + s.Leader = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + { + v := b.Int32() + s.ZKVersion = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if version >= 4 { + v := s.OfflineReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.OfflineReplicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if version >= 5 { + v := s.TopicStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestTopicState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 7 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + { + v := b.Int32() + s.Leader = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + { + v := b.Int32() + s.ZKVersion = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if version >= 4 { + v := s.OfflineReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.OfflineReplicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TopicStates = v + } + { + v := s.LiveBrokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestLiveBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.ID = v + } + if version >= 0 && version <= 0 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + if version >= 0 && version <= 0 { + v := b.Int32() + s.Port = v + } + if version >= 1 { + v := s.Endpoints + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestLiveBrokerEndpoint, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Port = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ListenerName = v + } + { + v := b.Int16() + s.SecurityProtocol = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Endpoints = v + } + if version >= 2 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.LiveBrokers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUpdateMetadataRequest returns a pointer to a default UpdateMetadataRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUpdateMetadataRequest() *UpdateMetadataRequest { + var v UpdateMetadataRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequest. +func (v *UpdateMetadataRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewUpdateMetadataRequest returns a default UpdateMetadataRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequest() UpdateMetadataRequest { + var v UpdateMetadataRequest + v.Default() + return v +} + +// UpdateMetadataResponses is returned from an UpdateMetadataRequest. +type UpdateMetadataResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*UpdateMetadataResponse) Key() int16 { return 6 } +func (*UpdateMetadataResponse) MaxVersion() int16 { return 8 } +func (v *UpdateMetadataResponse) SetVersion(version int16) { v.Version = version } +func (v *UpdateMetadataResponse) GetVersion() int16 { return v.Version } +func (v *UpdateMetadataResponse) IsFlexible() bool { return v.Version >= 6 } +func (v *UpdateMetadataResponse) RequestKind() Request { + return &UpdateMetadataRequest{Version: v.Version} +} + +func (v *UpdateMetadataResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UpdateMetadataResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UpdateMetadataResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UpdateMetadataResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUpdateMetadataResponse returns a pointer to a default UpdateMetadataResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUpdateMetadataResponse() *UpdateMetadataResponse { + var v UpdateMetadataResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataResponse. +func (v *UpdateMetadataResponse) Default() { +} + +// NewUpdateMetadataResponse returns a default UpdateMetadataResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataResponse() UpdateMetadataResponse { + var v UpdateMetadataResponse + v.Default() + return v +} + +// ControlledShutdownRequest is an advanced request that can be used to +// sthudown a broker in a controlled manner. +// +// As this is an advanced request and there is little reason to issue it as a +// client, this request is undocumented. However, the minimal amount of fields +// here makes the usage rather obvious. +// +// Kafka 2.2.0 introduced version 2, proposed in KIP-380. +// +// Note that version 0 of this request uses a special encoding format +// where the request does not include the client ID. +type ControlledShutdownRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + BrokerID int32 + + // This field has a default of -1. + BrokerEpoch int64 // v2+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ControlledShutdownRequest) Key() int16 { return 7 } +func (*ControlledShutdownRequest) MaxVersion() int16 { return 3 } +func (v *ControlledShutdownRequest) SetVersion(version int16) { v.Version = version } +func (v *ControlledShutdownRequest) GetVersion() int16 { return v.Version } +func (v *ControlledShutdownRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *ControlledShutdownRequest) ResponseKind() Response { + r := &ControlledShutdownResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ControlledShutdownRequest) RequestWith(ctx context.Context, r Requestor) (*ControlledShutdownResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ControlledShutdownResponse) + return resp, err +} + +func (v *ControlledShutdownRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + if version >= 2 { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ControlledShutdownRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ControlledShutdownRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ControlledShutdownRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + if version >= 2 { + v := b.Int64() + s.BrokerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrControlledShutdownRequest returns a pointer to a default ControlledShutdownRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrControlledShutdownRequest() *ControlledShutdownRequest { + var v ControlledShutdownRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ControlledShutdownRequest. +func (v *ControlledShutdownRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewControlledShutdownRequest returns a default ControlledShutdownRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewControlledShutdownRequest() ControlledShutdownRequest { + var v ControlledShutdownRequest + v.Default() + return v +} + +type ControlledShutdownResponsePartitionsRemaining struct { + Topic string + + Partition int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ControlledShutdownResponsePartitionsRemaining. +func (v *ControlledShutdownResponsePartitionsRemaining) Default() { +} + +// NewControlledShutdownResponsePartitionsRemaining returns a default ControlledShutdownResponsePartitionsRemaining +// This is a shortcut for creating a struct and calling Default yourself. +func NewControlledShutdownResponsePartitionsRemaining() ControlledShutdownResponsePartitionsRemaining { + var v ControlledShutdownResponsePartitionsRemaining + v.Default() + return v +} + +// ControlledShutdownResponse is returned from a ControlledShutdownRequest. +type ControlledShutdownResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + PartitionsRemaining []ControlledShutdownResponsePartitionsRemaining + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ControlledShutdownResponse) Key() int16 { return 7 } +func (*ControlledShutdownResponse) MaxVersion() int16 { return 3 } +func (v *ControlledShutdownResponse) SetVersion(version int16) { v.Version = version } +func (v *ControlledShutdownResponse) GetVersion() int16 { return v.Version } +func (v *ControlledShutdownResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *ControlledShutdownResponse) RequestKind() Request { + return &ControlledShutdownRequest{Version: v.Version} +} + +func (v *ControlledShutdownResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.PartitionsRemaining + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ControlledShutdownResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ControlledShutdownResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ControlledShutdownResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.PartitionsRemaining + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ControlledShutdownResponsePartitionsRemaining, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionsRemaining = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrControlledShutdownResponse returns a pointer to a default ControlledShutdownResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrControlledShutdownResponse() *ControlledShutdownResponse { + var v ControlledShutdownResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ControlledShutdownResponse. +func (v *ControlledShutdownResponse) Default() { +} + +// NewControlledShutdownResponse returns a default ControlledShutdownResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewControlledShutdownResponse() ControlledShutdownResponse { + var v ControlledShutdownResponse + v.Default() + return v +} + +type OffsetCommitRequestTopicPartition struct { + // Partition if a partition to commit offsets for. + Partition int32 + + // Offset is an offset to commit. + Offset int64 + + // Timestamp is the first iteration of tracking how long offset commits + // should persist in Kafka. This field only existed for v1. + // The expiration would be timestamp + offset.retention.minutes, or, if + // timestamp was zero, current time + offset.retention.minutes. + // + // This field has a default of -1. + Timestamp int64 // v1-v1 + + // LeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // is the leader epoch of the record this request is committing. + // + // The initial leader epoch can be determined from a MetadataResponse. + // To skip log truncation checking, use -1. + // + // This field has a default of -1. + LeaderEpoch int32 // v6+ + + // Metadata is optional data to include with committing the offset. This + // can contain information such as which node is doing the committing, etc. + Metadata *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitRequestTopicPartition. +func (v *OffsetCommitRequestTopicPartition) Default() { + v.Timestamp = -1 + v.LeaderEpoch = -1 +} + +// NewOffsetCommitRequestTopicPartition returns a default OffsetCommitRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitRequestTopicPartition() OffsetCommitRequestTopicPartition { + var v OffsetCommitRequestTopicPartition + v.Default() + return v +} + +type OffsetCommitRequestTopic struct { + // Topic is a topic to commit offsets for. + Topic string + + // Partitions contains partitions in a topic for which to commit offsets. + Partitions []OffsetCommitRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitRequestTopic. +func (v *OffsetCommitRequestTopic) Default() { +} + +// NewOffsetCommitRequestTopic returns a default OffsetCommitRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitRequestTopic() OffsetCommitRequestTopic { + var v OffsetCommitRequestTopic + v.Default() + return v +} + +// OffsetCommitRequest commits offsets for consumed topics / partitions in +// a group. +type OffsetCommitRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group this request is committing offsets to. + Group string + + // Generation being -1 and group being empty means the group is being used + // to store offsets only. No generation validation, no rebalancing. + // + // This field has a default of -1. + Generation int32 // v1+ + + // MemberID is the ID of the client issuing this request in the group. + MemberID string // v1+ + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v7+ + + // RetentionTimeMillis is how long this commit will persist in Kafka. + // + // This was introduced in v2, replacing an individual topic/partition's + // Timestamp from v1, and was removed in v5 with Kafka 2.1.0. + // + // This was removed because rarely committing consumers could have their + // offsets expired before committing, even though the consumer was still + // active. After restarting or rebalancing, the consumer would now not know + // the last committed offset and would have to start at the beginning or end, + // leading to duplicates or log loss. + // + // Post 2.1.0, if this field is empty, offsets are only deleted once the + // group is empty. Read KIP-211 for more details. + // + // This field has a default of -1. + RetentionTimeMillis int64 // v2-v4 + + // Topics is contains topics and partitions for which to commit offsets. + Topics []OffsetCommitRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +func (*OffsetCommitRequest) Key() int16 { return 8 } +func (*OffsetCommitRequest) MaxVersion() int16 { return 9 } +func (v *OffsetCommitRequest) SetVersion(version int16) { v.Version = version } +func (v *OffsetCommitRequest) GetVersion() int16 { return v.Version } +func (v *OffsetCommitRequest) IsFlexible() bool { return v.Version >= 8 } +func (v *OffsetCommitRequest) IsGroupCoordinatorRequest() {} +func (v *OffsetCommitRequest) ResponseKind() Response { + r := &OffsetCommitResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *OffsetCommitRequest) RequestWith(ctx context.Context, r Requestor) (*OffsetCommitResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*OffsetCommitResponse) + return resp, err +} + +func (v *OffsetCommitRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 8 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 7 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 2 && version <= 4 { + v := v.RetentionTimeMillis + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 && version <= 1 { + v := v.Timestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 6 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetCommitRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetCommitRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetCommitRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 8 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if version >= 1 { + v := b.Int32() + s.Generation = v + } + if version >= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 7 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + if version >= 2 && version <= 4 { + v := b.Int64() + s.RetentionTimeMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetCommitRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetCommitRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + if version >= 1 && version <= 1 { + v := b.Int64() + s.Timestamp = v + } + if version >= 6 { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Metadata = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetCommitRequest returns a pointer to a default OffsetCommitRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetCommitRequest() *OffsetCommitRequest { + var v OffsetCommitRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitRequest. +func (v *OffsetCommitRequest) Default() { + v.Generation = -1 + v.RetentionTimeMillis = -1 +} + +// NewOffsetCommitRequest returns a default OffsetCommitRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitRequest() OffsetCommitRequest { + var v OffsetCommitRequest + v.Default() + return v +} + +type OffsetCommitResponseTopicPartition struct { + // Partition is the partition in a topic this array slot corresponds to. + Partition int32 + + // ErrorCode is the error for this partition response. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // for the group. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // for the topic / partition. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic / partition does + // not exist. + // + // OFFSET_METADATA_TOO_LARGE is returned if the request metadata is + // larger than the brokers offset.metadata.max.bytes. + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // ILLEGAL_GENERATION is returned if the request's generation ID is invalid. + // + // UNKNOWN_MEMBER_ID is returned if the group is dead or the group does not + // know of the request's member ID. + // + // REBALANCE_IN_PROGRESS is returned if the group is finishing a rebalance. + // + // INVALID_COMMIT_OFFSET_SIZE is returned if the offset commit results in + // a record batch that is too large (likely due to large metadata). + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitResponseTopicPartition. +func (v *OffsetCommitResponseTopicPartition) Default() { +} + +// NewOffsetCommitResponseTopicPartition returns a default OffsetCommitResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitResponseTopicPartition() OffsetCommitResponseTopicPartition { + var v OffsetCommitResponseTopicPartition + v.Default() + return v +} + +type OffsetCommitResponseTopic struct { + // Topic is the topic this offset commit response corresponds to. + Topic string + + // Partitions contains responses for each requested partition in + // a topic. + Partitions []OffsetCommitResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitResponseTopic. +func (v *OffsetCommitResponseTopic) Default() { +} + +// NewOffsetCommitResponseTopic returns a default OffsetCommitResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitResponseTopic() OffsetCommitResponseTopic { + var v OffsetCommitResponseTopic + v.Default() + return v +} + +// OffsetCommitResponse is returned from an OffsetCommitRequest. +type OffsetCommitResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 4. + ThrottleMillis int32 // v3+ + + // Topics contains responses for each topic / partition in the commit request. + Topics []OffsetCommitResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +func (*OffsetCommitResponse) Key() int16 { return 8 } +func (*OffsetCommitResponse) MaxVersion() int16 { return 9 } +func (v *OffsetCommitResponse) SetVersion(version int16) { v.Version = version } +func (v *OffsetCommitResponse) GetVersion() int16 { return v.Version } +func (v *OffsetCommitResponse) IsFlexible() bool { return v.Version >= 8 } +func (v *OffsetCommitResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 4 } +func (v *OffsetCommitResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *OffsetCommitResponse) RequestKind() Request { return &OffsetCommitRequest{Version: v.Version} } + +func (v *OffsetCommitResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 8 + _ = isFlexible + if version >= 3 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetCommitResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetCommitResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetCommitResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 8 + _ = isFlexible + s := v + if version >= 3 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetCommitResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetCommitResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetCommitResponse returns a pointer to a default OffsetCommitResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetCommitResponse() *OffsetCommitResponse { + var v OffsetCommitResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitResponse. +func (v *OffsetCommitResponse) Default() { +} + +// NewOffsetCommitResponse returns a default OffsetCommitResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitResponse() OffsetCommitResponse { + var v OffsetCommitResponse + v.Default() + return v +} + +type OffsetFetchRequestTopic struct { + // Topic is a topic to fetch offsets for. + Topic string + + // Partitions in a list of partitions in a group to fetch offsets for. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchRequestTopic. +func (v *OffsetFetchRequestTopic) Default() { +} + +// NewOffsetFetchRequestTopic returns a default OffsetFetchRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchRequestTopic() OffsetFetchRequestTopic { + var v OffsetFetchRequestTopic + v.Default() + return v +} + +type OffsetFetchRequestGroupTopic struct { + Topic string + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchRequestGroupTopic. +func (v *OffsetFetchRequestGroupTopic) Default() { +} + +// NewOffsetFetchRequestGroupTopic returns a default OffsetFetchRequestGroupTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchRequestGroupTopic() OffsetFetchRequestGroupTopic { + var v OffsetFetchRequestGroupTopic + v.Default() + return v +} + +type OffsetFetchRequestGroup struct { + Group string + + // The member ID assigned by the group coordinator if using the new consumer protocol (KIP-848). + MemberID *string // v9+ + + // The member epoch if using the new consumer protocol (KIP-848). + // + // This field has a default of -1. + MemberEpoch int32 // v9+ + + Topics []OffsetFetchRequestGroupTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchRequestGroup. +func (v *OffsetFetchRequestGroup) Default() { + v.MemberEpoch = -1 +} + +// NewOffsetFetchRequestGroup returns a default OffsetFetchRequestGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchRequestGroup() OffsetFetchRequestGroup { + var v OffsetFetchRequestGroup + v.Default() + return v +} + +// OffsetFetchRequest requests the most recent committed offsets for topic +// partitions in a group. +type OffsetFetchRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group to fetch offsets for. + Group string // v0-v7 + + // Topics contains topics to fetch offets for. Version 2+ allows this to be + // null to return all topics the client is authorized to describe in the group. + Topics []OffsetFetchRequestTopic // v0-v7 + + // Groups, introduced in v8 (Kafka 3.0), allows for fetching offsets for + // multiple groups at a time. + // + // The fields here mirror the old top level fields on the request, thus they + // are left undocumented. Refer to the top level documentation if necessary. + Groups []OffsetFetchRequestGroup // v8+ + + // RequireStable signifies whether the broker should wait on returning + // unstable offsets, instead setting a retryable error on the relevant + // unstable partitions (UNSTABLE_OFFSET_COMMIT). See KIP-447 for more + // details. + RequireStable bool // v7+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*OffsetFetchRequest) Key() int16 { return 9 } +func (*OffsetFetchRequest) MaxVersion() int16 { return 9 } +func (v *OffsetFetchRequest) SetVersion(version int16) { v.Version = version } +func (v *OffsetFetchRequest) GetVersion() int16 { return v.Version } +func (v *OffsetFetchRequest) IsFlexible() bool { return v.Version >= 6 } +func (v *OffsetFetchRequest) IsGroupCoordinatorRequest() {} +func (v *OffsetFetchRequest) ResponseKind() Response { + r := &OffsetFetchResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *OffsetFetchRequest) RequestWith(ctx context.Context, r Requestor) (*OffsetFetchResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*OffsetFetchResponse) + return resp, err +} + +func (v *OffsetFetchRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + if version >= 0 && version <= 7 { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 7 { + v := v.Topics + if version >= 2 { + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + } else { + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 9 { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 9 { + v := v.MemberEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 7 { + v := v.RequireStable + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetFetchRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetFetchRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetFetchRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + if version >= 0 && version <= 7 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if version >= 0 && version <= 7 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 2 || l == 0 { + a = []OffsetFetchRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 8 { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchRequestGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if version >= 9 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.MemberID = v + } + if version >= 9 { + v := b.Int32() + s.MemberEpoch = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []OffsetFetchRequestGroupTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchRequestGroupTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if version >= 7 { + v := b.Bool() + s.RequireStable = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetFetchRequest returns a pointer to a default OffsetFetchRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetFetchRequest() *OffsetFetchRequest { + var v OffsetFetchRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchRequest. +func (v *OffsetFetchRequest) Default() { +} + +// NewOffsetFetchRequest returns a default OffsetFetchRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchRequest() OffsetFetchRequest { + var v OffsetFetchRequest + v.Default() + return v +} + +type OffsetFetchResponseTopicPartition struct { + // Partition is the partition in a topic this array slot corresponds to. + Partition int32 + + // Offset is the most recently committed offset for this topic partition + // in a group. + Offset int64 + + // LeaderEpoch is the leader epoch of the last consumed record. + // + // This was proposed in KIP-320 and introduced in Kafka 2.1.0 and allows + // clients to detect log truncation. See the KIP for more details. + // + // This field has a default of -1. + LeaderEpoch int32 // v5+ + + // Metadata is client provided metadata corresponding to the offset commit. + // This can be useful for adding who made the commit, etc. + Metadata *string + + // ErrorCode is the error for this partition response. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group. + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the requested topic or partition + // is unknown. + // + // UNSTABLE_OFFSET_COMMIT is returned for v7+ if the request set RequireStable. + // See KIP-447 for more details. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseTopicPartition. +func (v *OffsetFetchResponseTopicPartition) Default() { + v.LeaderEpoch = -1 +} + +// NewOffsetFetchResponseTopicPartition returns a default OffsetFetchResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseTopicPartition() OffsetFetchResponseTopicPartition { + var v OffsetFetchResponseTopicPartition + v.Default() + return v +} + +type OffsetFetchResponseTopic struct { + // Topic is the topic this offset fetch response corresponds to. + Topic string + + // Partitions contains responses for each requested partition in + // a topic. + Partitions []OffsetFetchResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseTopic. +func (v *OffsetFetchResponseTopic) Default() { +} + +// NewOffsetFetchResponseTopic returns a default OffsetFetchResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseTopic() OffsetFetchResponseTopic { + var v OffsetFetchResponseTopic + v.Default() + return v +} + +type OffsetFetchResponseGroupTopicPartition struct { + Partition int32 + + Offset int64 + + // This field has a default of -1. + LeaderEpoch int32 + + Metadata *string + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseGroupTopicPartition. +func (v *OffsetFetchResponseGroupTopicPartition) Default() { + v.LeaderEpoch = -1 +} + +// NewOffsetFetchResponseGroupTopicPartition returns a default OffsetFetchResponseGroupTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseGroupTopicPartition() OffsetFetchResponseGroupTopicPartition { + var v OffsetFetchResponseGroupTopicPartition + v.Default() + return v +} + +type OffsetFetchResponseGroupTopic struct { + Topic string + + Partitions []OffsetFetchResponseGroupTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseGroupTopic. +func (v *OffsetFetchResponseGroupTopic) Default() { +} + +// NewOffsetFetchResponseGroupTopic returns a default OffsetFetchResponseGroupTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseGroupTopic() OffsetFetchResponseGroupTopic { + var v OffsetFetchResponseGroupTopic + v.Default() + return v +} + +type OffsetFetchResponseGroup struct { + Group string + + Topics []OffsetFetchResponseGroupTopic + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseGroup. +func (v *OffsetFetchResponseGroup) Default() { +} + +// NewOffsetFetchResponseGroup returns a default OffsetFetchResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseGroup() OffsetFetchResponseGroup { + var v OffsetFetchResponseGroup + v.Default() + return v +} + +// OffsetFetchResponse is returned from an OffsetFetchRequest. +type OffsetFetchResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 4. + ThrottleMillis int32 // v3+ + + // Topics contains responses for each requested topic/partition. + Topics []OffsetFetchResponseTopic // v0-v7 + + // ErrorCode is a top level error code that applies to all topic/partitions. + // This will be any group error. + ErrorCode int16 // v2-v7 + + // Groups is the response for all groups. Each field mirrors the fields in the + // top level request, thus they are left undocumented. Refer to the top level + // documentation if necessary. + Groups []OffsetFetchResponseGroup // v8+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*OffsetFetchResponse) Key() int16 { return 9 } +func (*OffsetFetchResponse) MaxVersion() int16 { return 9 } +func (v *OffsetFetchResponse) SetVersion(version int16) { v.Version = version } +func (v *OffsetFetchResponse) GetVersion() int16 { return v.Version } +func (v *OffsetFetchResponse) IsFlexible() bool { return v.Version >= 6 } +func (v *OffsetFetchResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 4 } +func (v *OffsetFetchResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *OffsetFetchResponse) RequestKind() Request { return &OffsetFetchRequest{Version: v.Version} } + +func (v *OffsetFetchResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + if version >= 3 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 7 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 5 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 2 && version <= 7 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 8 { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetFetchResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetFetchResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetFetchResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + if version >= 3 { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 0 && version <= 7 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + if version >= 5 { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Metadata = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 2 && version <= 7 { + v := b.Int16() + s.ErrorCode = v + } + if version >= 8 { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseGroupTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseGroupTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Metadata = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetFetchResponse returns a pointer to a default OffsetFetchResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetFetchResponse() *OffsetFetchResponse { + var v OffsetFetchResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponse. +func (v *OffsetFetchResponse) Default() { +} + +// NewOffsetFetchResponse returns a default OffsetFetchResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponse() OffsetFetchResponse { + var v OffsetFetchResponse + v.Default() + return v +} + +// FindCoordinatorRequest requests the coordinator for a group or transaction. +// +// This coordinator is different from the broker leader coordinator. This +// coordinator is the partition leader for the partition that is storing +// the group or transaction ID. +type FindCoordinatorRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // CoordinatorKey is the ID to use for finding the coordinator. For groups, + // this is the group name, for transactional producer, this is the + // transactional ID. + CoordinatorKey string // v0-v3 + + // CoordinatorType is the type that key is. Groups are type 0, + // transactional IDs are type 1. + CoordinatorType int8 // v1+ + + // CoordinatorKeys contains all keys to find the coordinator for. + CoordinatorKeys []string // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*FindCoordinatorRequest) Key() int16 { return 10 } +func (*FindCoordinatorRequest) MaxVersion() int16 { return 4 } +func (v *FindCoordinatorRequest) SetVersion(version int16) { v.Version = version } +func (v *FindCoordinatorRequest) GetVersion() int16 { return v.Version } +func (v *FindCoordinatorRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *FindCoordinatorRequest) ResponseKind() Response { + r := &FindCoordinatorResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *FindCoordinatorRequest) RequestWith(ctx context.Context, r Requestor) (*FindCoordinatorResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*FindCoordinatorResponse) + return resp, err +} + +func (v *FindCoordinatorRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 0 && version <= 3 { + v := v.CoordinatorKey + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.CoordinatorType + dst = kbin.AppendInt8(dst, v) + } + if version >= 4 { + v := v.CoordinatorKeys + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FindCoordinatorRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FindCoordinatorRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FindCoordinatorRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 0 && version <= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.CoordinatorKey = v + } + if version >= 1 { + v := b.Int8() + s.CoordinatorType = v + } + if version >= 4 { + v := s.CoordinatorKeys + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.CoordinatorKeys = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrFindCoordinatorRequest returns a pointer to a default FindCoordinatorRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFindCoordinatorRequest() *FindCoordinatorRequest { + var v FindCoordinatorRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FindCoordinatorRequest. +func (v *FindCoordinatorRequest) Default() { +} + +// NewFindCoordinatorRequest returns a default FindCoordinatorRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewFindCoordinatorRequest() FindCoordinatorRequest { + var v FindCoordinatorRequest + v.Default() + return v +} + +type FindCoordinatorResponseCoordinator struct { + Key string + + NodeID int32 + + Host string + + Port int32 + + ErrorCode int16 + + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FindCoordinatorResponseCoordinator. +func (v *FindCoordinatorResponseCoordinator) Default() { +} + +// NewFindCoordinatorResponseCoordinator returns a default FindCoordinatorResponseCoordinator +// This is a shortcut for creating a struct and calling Default yourself. +func NewFindCoordinatorResponseCoordinator() FindCoordinatorResponseCoordinator { + var v FindCoordinatorResponseCoordinator + v.Default() + return v +} + +// FindCoordinatorResponse is returned from a FindCoordinatorRequest. +type FindCoordinatorResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error returned for the request. + // + // GROUP_AUTHORIZATION_FAILED is returned if for a group ID request and the + // client is not authorized to describe groups. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned for a transactional ID + // request and the client is not authorized to describe transactional IDs. + // + // INVALID_REQUEST is returned if not asking for a known type (group, + // or transaction). + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // for the requested ID, which would be if the group or transactional topic + // does not exist or the partition the requested key maps to is not available. + ErrorCode int16 // v0-v3 + + // ErrorMessage is an informative message if the request errored. + ErrorMessage *string // v1-v3 + + // NodeID is the broker ID of the coordinator. + NodeID int32 // v0-v3 + + // Host is the host of the coordinator. + Host string // v0-v3 + + // Port is the port of the coordinator. + Port int32 // v0-v3 + + // Coordinators, introduced for KIP-699, is the bulk response for + // coordinators. The fields in the struct exactly match the original fields + // in the FindCoordinatorResponse, thus they are left undocumented. + Coordinators []FindCoordinatorResponseCoordinator // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*FindCoordinatorResponse) Key() int16 { return 10 } +func (*FindCoordinatorResponse) MaxVersion() int16 { return 4 } +func (v *FindCoordinatorResponse) SetVersion(version int16) { v.Version = version } +func (v *FindCoordinatorResponse) GetVersion() int16 { return v.Version } +func (v *FindCoordinatorResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *FindCoordinatorResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *FindCoordinatorResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *FindCoordinatorResponse) RequestKind() Request { + return &FindCoordinatorRequest{Version: v.Version} +} + +func (v *FindCoordinatorResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 3 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 1 && version <= 3 { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 0 && version <= 3 { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 3 { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 3 { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.Coordinators + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Key + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FindCoordinatorResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FindCoordinatorResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FindCoordinatorResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 0 && version <= 3 { + v := b.Int16() + s.ErrorCode = v + } + if version >= 1 && version <= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if version >= 0 && version <= 3 { + v := b.Int32() + s.NodeID = v + } + if version >= 0 && version <= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + if version >= 0 && version <= 3 { + v := b.Int32() + s.Port = v + } + if version >= 4 { + v := s.Coordinators + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FindCoordinatorResponseCoordinator, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Key = v + } + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Coordinators = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrFindCoordinatorResponse returns a pointer to a default FindCoordinatorResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFindCoordinatorResponse() *FindCoordinatorResponse { + var v FindCoordinatorResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FindCoordinatorResponse. +func (v *FindCoordinatorResponse) Default() { +} + +// NewFindCoordinatorResponse returns a default FindCoordinatorResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewFindCoordinatorResponse() FindCoordinatorResponse { + var v FindCoordinatorResponse + v.Default() + return v +} + +type JoinGroupRequestProtocol struct { + // Name is a name of a protocol. This is arbitrary, but is used + // in the official client to agree on a partition balancing strategy. + // + // The official client uses range, roundrobin, or sticky (which was + // introduced in KIP-54). + Name string + + // Metadata is arbitrary information to pass along with this + // protocol name for this member. + // + // Note that while this is not documented in any protocol page, + // this is usually a serialized GroupMemberMetadata as described in + // https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Client-side+Assignment+Proposal. + // + // The protocol metadata is where group members will communicate which + // topics they collectively as a group want to consume. + Metadata []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to JoinGroupRequestProtocol. +func (v *JoinGroupRequestProtocol) Default() { +} + +// NewJoinGroupRequestProtocol returns a default JoinGroupRequestProtocol +// This is a shortcut for creating a struct and calling Default yourself. +func NewJoinGroupRequestProtocol() JoinGroupRequestProtocol { + var v JoinGroupRequestProtocol + v.Default() + return v +} + +// JoinGroupRequest issues a request to join a Kafka group. This will create a +// group if one does not exist. If joining an existing group, this may trigger +// a group rebalance. +// +// This will trigger a group rebalance if the request is from the group leader, +// or if the request is from a group member with different metadata, or if the +// request is with a new group member. +// +// Version 4 introduced replying to joins of existing groups with +// MEMBER_ID_REQUIRED, which requires re-issuing the join group with the +// returned member ID. See KIP-394 for more details. +// +// Version 5 introduced InstanceID, allowing for more "static" membership. +// See KIP-345 for more details. +type JoinGroupRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group to join. + Group string + + // SessionTimeoutMillis is how long a member in the group can go between + // heartbeats. If a member does not send a heartbeat within this timeout, + // the broker will remove the member from the group and initiate a rebalance. + SessionTimeoutMillis int32 + + // RebalanceTimeoutMillis is how long the broker waits for members to join a group + // once a rebalance begins. Kafka waits for the longest rebalance of all + // members in the group. Member sessions are still alive; heartbeats will be + // replied to with REBALANCE_IN_PROGRESS. Those members must transition to + // joining within this rebalance timeout. Members that do not rejoin within + // this timeout will be removed from the group. Members must commit offsets + // within this timeout. + // + // The first join for a new group has a 3 second grace period for other + // members to join; this grace period is extended until the RebalanceTimeoutMillis + // is up or until 3 seconds lapse with no new members. + // + // This field has a default of -1. + RebalanceTimeoutMillis int32 // v1+ + + // MemberID is the member ID to join the group with. When joining a group for + // the first time, use the empty string. The response will contain the member + // ID that should be used going forward. + MemberID string + + // InstanceID is a user configured ID that is used for making a group + // member "static", allowing many rebalances to be avoided. + InstanceID *string // v5+ + + // ProtocolType is the "type" of protocol being used for the join group. + // The initial group creation sets the type; all additional members must + // have the same type or they will be rejected. + // + // This is completely arbitrary, but the Java client and everything else + // uses "consumer" as the protocol type. + ProtocolType string + + // Protocols contains arbitrary information that group members use + // for rebalancing. All group members must agree on at least one protocol + // name. + Protocols []JoinGroupRequestProtocol + + // Reason is an optional reason the member is joining (or rejoining) the + // group (KIP-800, Kafka 3.2+). + Reason *string // v8+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*JoinGroupRequest) Key() int16 { return 11 } +func (*JoinGroupRequest) MaxVersion() int16 { return 9 } +func (v *JoinGroupRequest) SetVersion(version int16) { v.Version = version } +func (v *JoinGroupRequest) GetVersion() int16 { return v.Version } +func (v *JoinGroupRequest) IsFlexible() bool { return v.Version >= 6 } +func (v *JoinGroupRequest) IsGroupCoordinatorRequest() {} +func (v *JoinGroupRequest) ResponseKind() Response { + r := &JoinGroupResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *JoinGroupRequest) RequestWith(ctx context.Context, r Requestor) (*JoinGroupResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*JoinGroupResponse) + return resp, err +} + +func (v *JoinGroupRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.SessionTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.RebalanceTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 5 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Protocols + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 { + v := v.Reason + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *JoinGroupRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *JoinGroupRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *JoinGroupRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int32() + s.SessionTimeoutMillis = v + } + if version >= 1 { + v := b.Int32() + s.RebalanceTimeoutMillis = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ProtocolType = v + } + { + v := s.Protocols + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]JoinGroupRequestProtocol, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.Metadata = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Protocols = v + } + if version >= 8 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Reason = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrJoinGroupRequest returns a pointer to a default JoinGroupRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrJoinGroupRequest() *JoinGroupRequest { + var v JoinGroupRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to JoinGroupRequest. +func (v *JoinGroupRequest) Default() { + v.RebalanceTimeoutMillis = -1 +} + +// NewJoinGroupRequest returns a default JoinGroupRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewJoinGroupRequest() JoinGroupRequest { + var v JoinGroupRequest + v.Default() + return v +} + +type JoinGroupResponseMember struct { + // MemberID is a member in this group. + MemberID string + + // InstanceID is an instance ID of a member in this group (KIP-345). + InstanceID *string // v5+ + + // ProtocolMetadata is the metadata for this member for this protocol. + // This is usually of type GroupMemberMetadata. + ProtocolMetadata []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to JoinGroupResponseMember. +func (v *JoinGroupResponseMember) Default() { +} + +// NewJoinGroupResponseMember returns a default JoinGroupResponseMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewJoinGroupResponseMember() JoinGroupResponseMember { + var v JoinGroupResponseMember + v.Default() + return v +} + +// JoinGroupResponse is returned from a JoinGroupRequest. +type JoinGroupResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 3. + ThrottleMillis int32 // v2+ + + // ErrorCode is the error for the join group request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group (no read perms). + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // INVALID_SESSION_TIMEOUT is returned if the requested SessionTimeout is + // not within the broker's group.{min,max}.session.timeout.ms. + // + // INCONSISTENT_GROUP_PROTOCOL is returned if the requested protocols are + // incompatible with the existing group member's protocols, or if the join + // was for a new group but contained no protocols. + // + // UNKNOWN_MEMBER_ID is returned is the requested group is dead (likely + // just migrated to another coordinator or the group is temporarily unstable), + // or if the request was for a new group but contained a non-empty member ID, + // or if the group does not have the requested member ID (and the client must + // do the new-join-group dance). + // + // MEMBER_ID_REQUIRED is returned on the initial join of an existing group. + // This error was proposed in KIP-394 and introduced in Kafka 2.2.0 to + // prevent flaky clients from continually triggering rebalances and prevent + // these clients from consuming RAM with metadata. If a client sees + // this error, it should re-issue the join with the MemberID in the response. + // Non-flaky clients will join with this new member ID, but flaky clients + // will not join quickly enough before the pending member ID is rotated out + // due to hitting the session.timeout.ms. + // + // GROUP_MAX_SIZE_REACHED is returned as of Kafka 2.2.0 if the group has + // reached a broker's group.max.size. + ErrorCode int16 + + // Generation is the current "generation" of this group. + // + // This field has a default of -1. + Generation int32 + + // ProtocolType is the "type" of protocol being used for this group. + ProtocolType *string // v7+ + + // Protocol is the agreed upon protocol name (i.e. "sticky", "range"). + // + // v7 of this response changed this field to be nullable. + Protocol *string + + // LeaderID is the leader member. + LeaderID string + + // True if the leader must skip running the assignment (KIP-814, Kafka 3.2+). + SkipAssignment bool // v9+ + + // MemberID is the member of the receiving client. + MemberID string + + // Members contains all other members of this group. Only the group leader + // receives the members. The leader is responsible for balancing subscribed + // topic partitions and replying appropriately in a SyncGroup request. + Members []JoinGroupResponseMember + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*JoinGroupResponse) Key() int16 { return 11 } +func (*JoinGroupResponse) MaxVersion() int16 { return 9 } +func (v *JoinGroupResponse) SetVersion(version int16) { v.Version = version } +func (v *JoinGroupResponse) GetVersion() int16 { return v.Version } +func (v *JoinGroupResponse) IsFlexible() bool { return v.Version >= 6 } +func (v *JoinGroupResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 3 } +func (v *JoinGroupResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *JoinGroupResponse) RequestKind() Request { return &JoinGroupRequest{Version: v.Version} } + +func (v *JoinGroupResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + if version >= 2 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Protocol + if version < 7 { + var vv string + if v != nil { + vv = *v + } + { + v := vv + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } else { + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + } + { + v := v.LeaderID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 9 { + v := v.SkipAssignment + dst = kbin.AppendBool(dst, v) + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 5 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ProtocolMetadata + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *JoinGroupResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *JoinGroupResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *JoinGroupResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + if version >= 2 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.Generation = v + } + if version >= 7 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ProtocolType = v + } + { + var v *string + if version < 7 { + var vv string + if isFlexible { + if unsafe { + vv = b.UnsafeCompactString() + } else { + vv = b.CompactString() + } + } else { + if unsafe { + vv = b.UnsafeString() + } else { + vv = b.String() + } + } + v = &vv + } else { + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + } + s.Protocol = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.LeaderID = v + } + if version >= 9 { + v := b.Bool() + s.SkipAssignment = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]JoinGroupResponseMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.ProtocolMetadata = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrJoinGroupResponse returns a pointer to a default JoinGroupResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrJoinGroupResponse() *JoinGroupResponse { + var v JoinGroupResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to JoinGroupResponse. +func (v *JoinGroupResponse) Default() { + v.Generation = -1 +} + +// NewJoinGroupResponse returns a default JoinGroupResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewJoinGroupResponse() JoinGroupResponse { + var v JoinGroupResponse + v.Default() + return v +} + +// HeartbeatRequest issues a heartbeat for a member in a group, ensuring that +// Kafka does not expire the member from the group. +type HeartbeatRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group ID this heartbeat is for. + Group string + + // Generation is the group generation this heartbeat is for. + Generation int32 + + // MemberID is the member ID this member is for. + MemberID string + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*HeartbeatRequest) Key() int16 { return 12 } +func (*HeartbeatRequest) MaxVersion() int16 { return 4 } +func (v *HeartbeatRequest) SetVersion(version int16) { v.Version = version } +func (v *HeartbeatRequest) GetVersion() int16 { return v.Version } +func (v *HeartbeatRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *HeartbeatRequest) IsGroupCoordinatorRequest() {} +func (v *HeartbeatRequest) ResponseKind() Response { + r := &HeartbeatResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *HeartbeatRequest) RequestWith(ctx context.Context, r Requestor) (*HeartbeatResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*HeartbeatResponse) + return resp, err +} + +func (v *HeartbeatRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *HeartbeatRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *HeartbeatRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *HeartbeatRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int32() + s.Generation = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrHeartbeatRequest returns a pointer to a default HeartbeatRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrHeartbeatRequest() *HeartbeatRequest { + var v HeartbeatRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to HeartbeatRequest. +func (v *HeartbeatRequest) Default() { +} + +// NewHeartbeatRequest returns a default HeartbeatRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewHeartbeatRequest() HeartbeatRequest { + var v HeartbeatRequest + v.Default() + return v +} + +// HeartbeatResponse is returned from a HeartbeatRequest. +type HeartbeatResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error for the heartbeat request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group (no read perms). + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // UNKNOWN_MEMBER_ID is returned if the member ID is not a part of the group, + // or if the group is empty or dead. + // + // ILLEGAL_GENERATION is returned if the request's generation ID is invalid. + // + // REBALANCE_IN_PROGRESS is returned if the group is currently rebalancing. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*HeartbeatResponse) Key() int16 { return 12 } +func (*HeartbeatResponse) MaxVersion() int16 { return 4 } +func (v *HeartbeatResponse) SetVersion(version int16) { v.Version = version } +func (v *HeartbeatResponse) GetVersion() int16 { return v.Version } +func (v *HeartbeatResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *HeartbeatResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *HeartbeatResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *HeartbeatResponse) RequestKind() Request { return &HeartbeatRequest{Version: v.Version} } + +func (v *HeartbeatResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *HeartbeatResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *HeartbeatResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *HeartbeatResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrHeartbeatResponse returns a pointer to a default HeartbeatResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrHeartbeatResponse() *HeartbeatResponse { + var v HeartbeatResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to HeartbeatResponse. +func (v *HeartbeatResponse) Default() { +} + +// NewHeartbeatResponse returns a default HeartbeatResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewHeartbeatResponse() HeartbeatResponse { + var v HeartbeatResponse + v.Default() + return v +} + +type LeaveGroupRequestMember struct { + MemberID string + + InstanceID *string + + // Reason is an optional reason why this member is leaving the group + // (KIP-800, Kafka 3.2+). + Reason *string // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaveGroupRequestMember. +func (v *LeaveGroupRequestMember) Default() { +} + +// NewLeaveGroupRequestMember returns a default LeaveGroupRequestMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaveGroupRequestMember() LeaveGroupRequestMember { + var v LeaveGroupRequestMember + v.Default() + return v +} + +// LeaveGroupRequest issues a request for a group member to leave the group, +// triggering a group rebalance. +// +// Version 3 changed removed MemberID and added a batch instance+member ID +// way of leaving a group. +type LeaveGroupRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group to leave. + Group string + + // MemberID is the member that is leaving. + MemberID string // v0-v2 + + // Members are member and group instance IDs to cause to leave a group. + Members []LeaveGroupRequestMember // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*LeaveGroupRequest) Key() int16 { return 13 } +func (*LeaveGroupRequest) MaxVersion() int16 { return 5 } +func (v *LeaveGroupRequest) SetVersion(version int16) { v.Version = version } +func (v *LeaveGroupRequest) GetVersion() int16 { return v.Version } +func (v *LeaveGroupRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *LeaveGroupRequest) IsGroupCoordinatorRequest() {} +func (v *LeaveGroupRequest) ResponseKind() Response { + r := &LeaveGroupResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *LeaveGroupRequest) RequestWith(ctx context.Context, r Requestor) (*LeaveGroupResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*LeaveGroupResponse) + return resp, err +} + +func (v *LeaveGroupRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 2 { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.Reason + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaveGroupRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaveGroupRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaveGroupRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if version >= 0 && version <= 2 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaveGroupRequestMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Reason = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrLeaveGroupRequest returns a pointer to a default LeaveGroupRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrLeaveGroupRequest() *LeaveGroupRequest { + var v LeaveGroupRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaveGroupRequest. +func (v *LeaveGroupRequest) Default() { +} + +// NewLeaveGroupRequest returns a default LeaveGroupRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaveGroupRequest() LeaveGroupRequest { + var v LeaveGroupRequest + v.Default() + return v +} + +type LeaveGroupResponseMember struct { + MemberID string + + InstanceID *string + + // An individual member's leave error code. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaveGroupResponseMember. +func (v *LeaveGroupResponseMember) Default() { +} + +// NewLeaveGroupResponseMember returns a default LeaveGroupResponseMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaveGroupResponseMember() LeaveGroupResponseMember { + var v LeaveGroupResponseMember + v.Default() + return v +} + +// LeaveGroupResponse is returned from a LeaveGroupRequest. +type LeaveGroupResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error for the leave group request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group (no read perms). + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // UNKNOWN_MEMBER_ID is returned if the member ID is not a part of the group, + // or if the group is empty or dead. + ErrorCode int16 + + // Members are the list of members and group instance IDs that left the group. + Members []LeaveGroupResponseMember // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*LeaveGroupResponse) Key() int16 { return 13 } +func (*LeaveGroupResponse) MaxVersion() int16 { return 5 } +func (v *LeaveGroupResponse) SetVersion(version int16) { v.Version = version } +func (v *LeaveGroupResponse) GetVersion() int16 { return v.Version } +func (v *LeaveGroupResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *LeaveGroupResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *LeaveGroupResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *LeaveGroupResponse) RequestKind() Request { return &LeaveGroupRequest{Version: v.Version} } + +func (v *LeaveGroupResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 3 { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaveGroupResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaveGroupResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaveGroupResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 3 { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaveGroupResponseMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrLeaveGroupResponse returns a pointer to a default LeaveGroupResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrLeaveGroupResponse() *LeaveGroupResponse { + var v LeaveGroupResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaveGroupResponse. +func (v *LeaveGroupResponse) Default() { +} + +// NewLeaveGroupResponse returns a default LeaveGroupResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaveGroupResponse() LeaveGroupResponse { + var v LeaveGroupResponse + v.Default() + return v +} + +type SyncGroupRequestGroupAssignment struct { + // MemberID is the member this assignment is for. + MemberID string + + // MemberAssignment is the assignment for this member. This is typically + // of type GroupMemberAssignment. + MemberAssignment []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SyncGroupRequestGroupAssignment. +func (v *SyncGroupRequestGroupAssignment) Default() { +} + +// NewSyncGroupRequestGroupAssignment returns a default SyncGroupRequestGroupAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewSyncGroupRequestGroupAssignment() SyncGroupRequestGroupAssignment { + var v SyncGroupRequestGroupAssignment + v.Default() + return v +} + +// SyncGroupRequest is issued by all group members after they receive a a +// response for JoinGroup. The group leader is responsible for sending member +// assignments with the request; all other members do not. +// +// Once the leader sends the group assignment, all members will be replied to. +type SyncGroupRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group ID this sync group is for. + Group string + + // Generation is the group generation this sync is for. + Generation int32 + + // MemberID is the member ID this member is. + MemberID string + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v3+ + + // ProtocolType is the "type" of protocol being used for this group. + ProtocolType *string // v5+ + + // Protocol is the agreed upon protocol name (i.e. "sticky", "range"). + Protocol *string // v5+ + + // GroupAssignment, sent only from the group leader, is the topic partition + // assignment it has decided on for all members. + GroupAssignment []SyncGroupRequestGroupAssignment + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*SyncGroupRequest) Key() int16 { return 14 } +func (*SyncGroupRequest) MaxVersion() int16 { return 5 } +func (v *SyncGroupRequest) SetVersion(version int16) { v.Version = version } +func (v *SyncGroupRequest) GetVersion() int16 { return v.Version } +func (v *SyncGroupRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *SyncGroupRequest) IsGroupCoordinatorRequest() {} +func (v *SyncGroupRequest) ResponseKind() Response { + r := &SyncGroupResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *SyncGroupRequest) RequestWith(ctx context.Context, r Requestor) (*SyncGroupResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*SyncGroupResponse) + return resp, err +} + +func (v *SyncGroupRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.Protocol + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.GroupAssignment + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MemberAssignment + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *SyncGroupRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SyncGroupRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SyncGroupRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int32() + s.Generation = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ProtocolType = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Protocol = v + } + { + v := s.GroupAssignment + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]SyncGroupRequestGroupAssignment, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.MemberAssignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.GroupAssignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrSyncGroupRequest returns a pointer to a default SyncGroupRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSyncGroupRequest() *SyncGroupRequest { + var v SyncGroupRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SyncGroupRequest. +func (v *SyncGroupRequest) Default() { +} + +// NewSyncGroupRequest returns a default SyncGroupRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewSyncGroupRequest() SyncGroupRequest { + var v SyncGroupRequest + v.Default() + return v +} + +// SyncGroupResponse is returned from a SyncGroupRequest. +type SyncGroupResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error for the sync group request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group (no read perms). + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // UNKNOWN_MEMBER_ID is returned if the member ID is not a part of the group, + // or if the group is empty or dead. + // + // ILLEGAL_GENERATION is returned if the request's generation ID is invalid. + // + // REBALANCE_IN_PROGRESS is returned if the group switched back to rebalancing. + // + // UNKNOWN_SERVER_ERROR is returned if the store of the group assignment + // resulted in a too large message. + ErrorCode int16 + + // ProtocolType is the "type" of protocol being used for this group. + ProtocolType *string // v5+ + + // Protocol is the agreed upon protocol name (i.e. "sticky", "range"). + Protocol *string // v5+ + + // MemberAssignment is the assignment for this member that the leader + // determined. + MemberAssignment []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*SyncGroupResponse) Key() int16 { return 14 } +func (*SyncGroupResponse) MaxVersion() int16 { return 5 } +func (v *SyncGroupResponse) SetVersion(version int16) { v.Version = version } +func (v *SyncGroupResponse) GetVersion() int16 { return v.Version } +func (v *SyncGroupResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *SyncGroupResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *SyncGroupResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *SyncGroupResponse) RequestKind() Request { return &SyncGroupRequest{Version: v.Version} } + +func (v *SyncGroupResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 5 { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.Protocol + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MemberAssignment + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *SyncGroupResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SyncGroupResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SyncGroupResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ProtocolType = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Protocol = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.MemberAssignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrSyncGroupResponse returns a pointer to a default SyncGroupResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSyncGroupResponse() *SyncGroupResponse { + var v SyncGroupResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SyncGroupResponse. +func (v *SyncGroupResponse) Default() { +} + +// NewSyncGroupResponse returns a default SyncGroupResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewSyncGroupResponse() SyncGroupResponse { + var v SyncGroupResponse + v.Default() + return v +} + +// DescribeGroupsRequest requests metadata for group IDs. +type DescribeGroupsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Groups is an array of group IDs to request metadata for. + Groups []string + + // IncludeAuthorizedOperations, introduced in Kafka 2.3.0, specifies + // whether to include a bitfield of AclOperations this client can perform + // on the groups. See KIP-430 for more details. + IncludeAuthorizedOperations bool // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +func (*DescribeGroupsRequest) Key() int16 { return 15 } +func (*DescribeGroupsRequest) MaxVersion() int16 { return 5 } +func (v *DescribeGroupsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeGroupsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeGroupsRequest) IsFlexible() bool { return v.Version >= 5 } +func (v *DescribeGroupsRequest) IsGroupCoordinatorRequest() {} +func (v *DescribeGroupsRequest) ResponseKind() Response { + r := &DescribeGroupsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeGroupsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeGroupsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeGroupsResponse) + return resp, err +} + +func (v *DescribeGroupsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if version >= 3 { + v := v.IncludeAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeGroupsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeGroupsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeGroupsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + s := v + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.Groups = v + } + if version >= 3 { + v := b.Bool() + s.IncludeAuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeGroupsRequest returns a pointer to a default DescribeGroupsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeGroupsRequest() *DescribeGroupsRequest { + var v DescribeGroupsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeGroupsRequest. +func (v *DescribeGroupsRequest) Default() { +} + +// NewDescribeGroupsRequest returns a default DescribeGroupsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeGroupsRequest() DescribeGroupsRequest { + var v DescribeGroupsRequest + v.Default() + return v +} + +type DescribeGroupsResponseGroupMember struct { + // MemberID is the member ID of a member in this group. + MemberID string + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v4+ + + // ClientID is the client ID used by this member. + ClientID string + + // ClientHost is the host this client is running on. + ClientHost string + + // ProtocolMetadata is the metadata this member included when joining + // the group. If using normal (Java-like) consumers, this will be of + // type GroupMemberMetadata. + ProtocolMetadata []byte + + // MemberAssignment is the assignment for this member in the group. + // If using normal (Java-like) consumers, this will be of type + // GroupMemberAssignment. + MemberAssignment []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeGroupsResponseGroupMember. +func (v *DescribeGroupsResponseGroupMember) Default() { +} + +// NewDescribeGroupsResponseGroupMember returns a default DescribeGroupsResponseGroupMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeGroupsResponseGroupMember() DescribeGroupsResponseGroupMember { + var v DescribeGroupsResponseGroupMember + v.Default() + return v +} + +type DescribeGroupsResponseGroup struct { + // ErrorCode is the error code for an individual group in a request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to describe a group. + // + // INVALID_GROUP_ID is returned if the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator for this + // group is not yet active. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the + // coordinator for this group. + ErrorCode int16 + + // Group is the id of this group. + Group string + + // State is the state this group is in. + State string + + // ProtocolType is the "type" of protocol being used for this group. + ProtocolType string + + // Protocol is the agreed upon protocol for all members in this group. + Protocol string + + // Members contains members in this group. + Members []DescribeGroupsResponseGroupMember + + // AuthorizedOperations is a bitfield containing which operations the + // the client is allowed to perform on this group. + // This is only returned if requested. + // + // This field has a default of -2147483648. + AuthorizedOperations int32 // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeGroupsResponseGroup. +func (v *DescribeGroupsResponseGroup) Default() { + v.AuthorizedOperations = -2147483648 +} + +// NewDescribeGroupsResponseGroup returns a default DescribeGroupsResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeGroupsResponseGroup() DescribeGroupsResponseGroup { + var v DescribeGroupsResponseGroup + v.Default() + return v +} + +// DescribeGroupsResponse is returned from a DescribeGroupsRequest. +type DescribeGroupsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // Groups is an array of group metadata. + Groups []DescribeGroupsResponseGroup + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +func (*DescribeGroupsResponse) Key() int16 { return 15 } +func (*DescribeGroupsResponse) MaxVersion() int16 { return 5 } +func (v *DescribeGroupsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeGroupsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeGroupsResponse) IsFlexible() bool { return v.Version >= 5 } +func (v *DescribeGroupsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *DescribeGroupsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DescribeGroupsResponse) RequestKind() Request { + return &DescribeGroupsRequest{Version: v.Version} +} + +func (v *DescribeGroupsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.State + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Protocol + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 4 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ClientID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ClientHost + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProtocolMetadata + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.MemberAssignment + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 3 { + v := v.AuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeGroupsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeGroupsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeGroupsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeGroupsResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.State = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ProtocolType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Protocol = v + } + { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeGroupsResponseGroupMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 4 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientHost = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.ProtocolMetadata = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.MemberAssignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if version >= 3 { + v := b.Int32() + s.AuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeGroupsResponse returns a pointer to a default DescribeGroupsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeGroupsResponse() *DescribeGroupsResponse { + var v DescribeGroupsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeGroupsResponse. +func (v *DescribeGroupsResponse) Default() { +} + +// NewDescribeGroupsResponse returns a default DescribeGroupsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeGroupsResponse() DescribeGroupsResponse { + var v DescribeGroupsResponse + v.Default() + return v +} + +// ListGroupsRequest issues a request to list all groups. +// +// To list all groups in a cluster, this must be issued to every broker. +type ListGroupsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // StatesFilter, proposed in KIP-518 and introduced in Kafka 2.6.0, + // allows filtering groups by state, where a state is any of + // "Preparing", "PreparingRebalance", "CompletingRebalance", "Stable", + // "Dead", or "Empty". If empty, all groups are returned. + StatesFilter []string // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ListGroupsRequest) Key() int16 { return 16 } +func (*ListGroupsRequest) MaxVersion() int16 { return 4 } +func (v *ListGroupsRequest) SetVersion(version int16) { v.Version = version } +func (v *ListGroupsRequest) GetVersion() int16 { return v.Version } +func (v *ListGroupsRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *ListGroupsRequest) ResponseKind() Response { + r := &ListGroupsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ListGroupsRequest) RequestWith(ctx context.Context, r Requestor) (*ListGroupsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ListGroupsResponse) + return resp, err +} + +func (v *ListGroupsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 4 { + v := v.StatesFilter + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListGroupsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListGroupsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListGroupsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 4 { + v := s.StatesFilter + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.StatesFilter = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListGroupsRequest returns a pointer to a default ListGroupsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListGroupsRequest() *ListGroupsRequest { + var v ListGroupsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListGroupsRequest. +func (v *ListGroupsRequest) Default() { +} + +// NewListGroupsRequest returns a default ListGroupsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewListGroupsRequest() ListGroupsRequest { + var v ListGroupsRequest + v.Default() + return v +} + +type ListGroupsResponseGroup struct { + // Group is a Kafka group. + Group string + + // ProtocolType is the protocol type in use by the group. + ProtocolType string + + // The group state. + GroupState string // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListGroupsResponseGroup. +func (v *ListGroupsResponseGroup) Default() { +} + +// NewListGroupsResponseGroup returns a default ListGroupsResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewListGroupsResponseGroup() ListGroupsResponseGroup { + var v ListGroupsResponseGroup + v.Default() + return v +} + +// ListGroupsResponse is returned from a ListGroupsRequest. +type ListGroupsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error returned for the list groups request. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not yet active. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group manager is loading. + ErrorCode int16 + + // Groups is the list of groups Kafka knows of. + Groups []ListGroupsResponseGroup + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ListGroupsResponse) Key() int16 { return 16 } +func (*ListGroupsResponse) MaxVersion() int16 { return 4 } +func (v *ListGroupsResponse) SetVersion(version int16) { v.Version = version } +func (v *ListGroupsResponse) GetVersion() int16 { return v.Version } +func (v *ListGroupsResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *ListGroupsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *ListGroupsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ListGroupsResponse) RequestKind() Request { return &ListGroupsRequest{Version: v.Version} } + +func (v *ListGroupsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 4 { + v := v.GroupState + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListGroupsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListGroupsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListGroupsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListGroupsResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ProtocolType = v + } + if version >= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.GroupState = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListGroupsResponse returns a pointer to a default ListGroupsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListGroupsResponse() *ListGroupsResponse { + var v ListGroupsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListGroupsResponse. +func (v *ListGroupsResponse) Default() { +} + +// NewListGroupsResponse returns a default ListGroupsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewListGroupsResponse() ListGroupsResponse { + var v ListGroupsResponse + v.Default() + return v +} + +// SASLHandshakeRequest begins the sasl authentication flow. Note that Kerberos +// GSSAPI authentication has its own unique flow. +type SASLHandshakeRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Mechanism is the mechanism to use for the sasl handshake (e.g., "PLAIN"). + // + // For version 0, if this mechanism is supported, it is expected that the + // client immediately authenticates using this mechanism. Note that the + // only mechanism exclusive to v0 is PLAIN. + // + // For version 1, if the mechanism is supported, the next request to issue + // is SASLHandshakeRequest. + Mechanism string +} + +func (*SASLHandshakeRequest) Key() int16 { return 17 } +func (*SASLHandshakeRequest) MaxVersion() int16 { return 1 } +func (v *SASLHandshakeRequest) SetVersion(version int16) { v.Version = version } +func (v *SASLHandshakeRequest) GetVersion() int16 { return v.Version } +func (v *SASLHandshakeRequest) IsFlexible() bool { return false } +func (v *SASLHandshakeRequest) ResponseKind() Response { + r := &SASLHandshakeResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *SASLHandshakeRequest) RequestWith(ctx context.Context, r Requestor) (*SASLHandshakeResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*SASLHandshakeResponse) + return resp, err +} + +func (v *SASLHandshakeRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Mechanism + dst = kbin.AppendString(dst, v) + } + return dst +} + +func (v *SASLHandshakeRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SASLHandshakeRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SASLHandshakeRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Mechanism = v + } + return b.Complete() +} + +// NewPtrSASLHandshakeRequest returns a pointer to a default SASLHandshakeRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSASLHandshakeRequest() *SASLHandshakeRequest { + var v SASLHandshakeRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SASLHandshakeRequest. +func (v *SASLHandshakeRequest) Default() { +} + +// NewSASLHandshakeRequest returns a default SASLHandshakeRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewSASLHandshakeRequest() SASLHandshakeRequest { + var v SASLHandshakeRequest + v.Default() + return v +} + +// SASLHandshakeResponse is returned for a SASLHandshakeRequest. +type SASLHandshakeResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is non-zero for ILLEGAL_SASL_STATE, meaning a sasl handshake + // is not expected at this point in the connection, or UNSUPPORTED_SASL_MECHANISM, + // meaning the requested mechanism is not supported. + ErrorCode int16 + + // SupportedMechanisms is the list of mechanisms supported if this request + // errored. + SupportedMechanisms []string +} + +func (*SASLHandshakeResponse) Key() int16 { return 17 } +func (*SASLHandshakeResponse) MaxVersion() int16 { return 1 } +func (v *SASLHandshakeResponse) SetVersion(version int16) { v.Version = version } +func (v *SASLHandshakeResponse) GetVersion() int16 { return v.Version } +func (v *SASLHandshakeResponse) IsFlexible() bool { return false } +func (v *SASLHandshakeResponse) RequestKind() Request { + return &SASLHandshakeRequest{Version: v.Version} +} + +func (v *SASLHandshakeResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.SupportedMechanisms + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendString(dst, v) + } + } + return dst +} + +func (v *SASLHandshakeResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SASLHandshakeResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SASLHandshakeResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.SupportedMechanisms + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + a[i] = v + } + v = a + s.SupportedMechanisms = v + } + return b.Complete() +} + +// NewPtrSASLHandshakeResponse returns a pointer to a default SASLHandshakeResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSASLHandshakeResponse() *SASLHandshakeResponse { + var v SASLHandshakeResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SASLHandshakeResponse. +func (v *SASLHandshakeResponse) Default() { +} + +// NewSASLHandshakeResponse returns a default SASLHandshakeResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewSASLHandshakeResponse() SASLHandshakeResponse { + var v SASLHandshakeResponse + v.Default() + return v +} + +// ApiVersionsRequest requests what API versions a Kafka broker supports. +// +// Note that the client does not know the version a broker supports before +// sending this request. +// +// Before Kafka 2.4.0, if the client used a version larger than the broker +// understands, the broker would reply with an UNSUPPORTED_VERSION error using +// the version 0 message format (i.e., 6 bytes long!). The client should retry +// with a lower version. +// +// After Kafka 2.4.0, if the client uses a version larger than the broker +// understands, the broker replies with UNSUPPORTED_VERSIONS using the version +// 0 message format but additionally includes the api versions the broker does +// support. +type ApiVersionsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ClientSoftwareName, added for KIP-511 with Kafka 2.4.0, is the name of the + // client issuing this request. The broker can use this to enrich its own + // debugging information of which version of what clients are connected. + // + // If using v3, this field is required and must match the following pattern: + // + // [a-zA-Z0-9](?:[a-zA-Z0-9\\-.]*[a-zA-Z0-9])? + ClientSoftwareName string // v3+ + + // ClientSoftwareVersion is the version of the software name in the prior + // field. It must match the same regex (thus, this is also required). + ClientSoftwareVersion string // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ApiVersionsRequest) Key() int16 { return 18 } +func (*ApiVersionsRequest) MaxVersion() int16 { return 3 } +func (v *ApiVersionsRequest) SetVersion(version int16) { v.Version = version } +func (v *ApiVersionsRequest) GetVersion() int16 { return v.Version } +func (v *ApiVersionsRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *ApiVersionsRequest) ResponseKind() Response { + r := &ApiVersionsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ApiVersionsRequest) RequestWith(ctx context.Context, r Requestor) (*ApiVersionsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ApiVersionsResponse) + return resp, err +} + +func (v *ApiVersionsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 3 { + v := v.ClientSoftwareName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.ClientSoftwareVersion + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ApiVersionsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ApiVersionsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ApiVersionsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientSoftwareName = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientSoftwareVersion = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrApiVersionsRequest returns a pointer to a default ApiVersionsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrApiVersionsRequest() *ApiVersionsRequest { + var v ApiVersionsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsRequest. +func (v *ApiVersionsRequest) Default() { +} + +// NewApiVersionsRequest returns a default ApiVersionsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsRequest() ApiVersionsRequest { + var v ApiVersionsRequest + v.Default() + return v +} + +type ApiVersionsResponseApiKey struct { + // ApiKey is the key of a message request. + ApiKey int16 + + // MinVersion is the min version a broker supports for an API key. + MinVersion int16 + + // MaxVersion is the max version a broker supports for an API key. + MaxVersion int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsResponseApiKey. +func (v *ApiVersionsResponseApiKey) Default() { +} + +// NewApiVersionsResponseApiKey returns a default ApiVersionsResponseApiKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsResponseApiKey() ApiVersionsResponseApiKey { + var v ApiVersionsResponseApiKey + v.Default() + return v +} + +type ApiVersionsResponseSupportedFeature struct { + // The name of the feature. + Name string + + // The minimum supported version for the feature. + MinVersion int16 + + // The maximum supported version for the feature. + MaxVersion int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsResponseSupportedFeature. +func (v *ApiVersionsResponseSupportedFeature) Default() { +} + +// NewApiVersionsResponseSupportedFeature returns a default ApiVersionsResponseSupportedFeature +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsResponseSupportedFeature() ApiVersionsResponseSupportedFeature { + var v ApiVersionsResponseSupportedFeature + v.Default() + return v +} + +type ApiVersionsResponseFinalizedFeature struct { + // The name of the feature. + Name string + + // The cluster-wide finalized max version level for the feature. + MaxVersionLevel int16 + + // The cluster-wide finalized min version level for the feature. + MinVersionLevel int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsResponseFinalizedFeature. +func (v *ApiVersionsResponseFinalizedFeature) Default() { +} + +// NewApiVersionsResponseFinalizedFeature returns a default ApiVersionsResponseFinalizedFeature +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsResponseFinalizedFeature() ApiVersionsResponseFinalizedFeature { + var v ApiVersionsResponseFinalizedFeature + v.Default() + return v +} + +// ApiVersionsResponse is returned from an ApiVersionsRequest. +type ApiVersionsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is UNSUPPORTED_VERSION if the request was issued with a higher + // version than the broker supports. Before Kafka 2.4.0, if this error is + // returned, the rest of this struct will be empty. + // + // Starting in Kafka 2.4.0 (with version 3), even with an UNSUPPORTED_VERSION + // error, the broker still replies with the ApiKeys it supports. + ErrorCode int16 + + // ApiKeys is an array corresponding to API keys the broker supports + // and the range of supported versions for each key. + ApiKeys []ApiVersionsResponseApiKey + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // Features supported by the broker (see KIP-584). + SupportedFeatures []ApiVersionsResponseSupportedFeature // tag 0 + + // The monotonically increasing epoch for the finalized features information, + // where -1 indicates an unknown epoch. + // + // This field has a default of -1. + FinalizedFeaturesEpoch int64 // tag 1 + + // The list of cluster-wide finalized features (only valid if + // FinalizedFeaturesEpoch is >= 0). + FinalizedFeatures []ApiVersionsResponseFinalizedFeature // tag 2 + + // Set by a KRaft controller if the required configurations for ZK migration + // are present + ZkMigrationReady bool // tag 3 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ApiVersionsResponse) Key() int16 { return 18 } +func (*ApiVersionsResponse) MaxVersion() int16 { return 3 } +func (v *ApiVersionsResponse) SetVersion(version int16) { v.Version = version } +func (v *ApiVersionsResponse) GetVersion() int16 { return v.Version } +func (v *ApiVersionsResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *ApiVersionsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *ApiVersionsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ApiVersionsResponse) RequestKind() Request { return &ApiVersionsRequest{Version: v.Version} } + +func (v *ApiVersionsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ApiKeys + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ApiKey + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MinVersion + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MaxVersion + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + var toEncode []uint32 + if len(v.SupportedFeatures) > 0 { + toEncode = append(toEncode, 0) + } + if v.FinalizedFeaturesEpoch != -1 { + toEncode = append(toEncode, 1) + } + if len(v.FinalizedFeatures) > 0 { + toEncode = append(toEncode, 2) + } + if v.ZkMigrationReady != false { + toEncode = append(toEncode, 3) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.SupportedFeatures + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fSupportedFeatures: + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MinVersion + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MaxVersion + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fSupportedFeatures + } + } + case 1: + { + v := v.FinalizedFeaturesEpoch + dst = kbin.AppendUvarint(dst, 1) + dst = kbin.AppendUvarint(dst, 8) + dst = kbin.AppendInt64(dst, v) + } + case 2: + { + v := v.FinalizedFeatures + dst = kbin.AppendUvarint(dst, 2) + sized := false + lenAt := len(dst) + fFinalizedFeatures: + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MaxVersionLevel + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MinVersionLevel + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fFinalizedFeatures + } + } + case 3: + { + v := v.ZkMigrationReady + dst = kbin.AppendUvarint(dst, 3) + dst = kbin.AppendUvarint(dst, 1) + dst = kbin.AppendBool(dst, v) + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ApiVersionsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ApiVersionsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ApiVersionsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.ApiKeys + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ApiVersionsResponseApiKey, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ApiKey = v + } + { + v := b.Int16() + s.MinVersion = v + } + { + v := b.Int16() + s.MaxVersion = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ApiKeys = v + } + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := s.SupportedFeatures + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ApiVersionsResponseSupportedFeature, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int16() + s.MinVersion = v + } + { + v := b.Int16() + s.MaxVersion = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.SupportedFeatures = v + if err := b.Complete(); err != nil { + return err + } + case 1: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := b.Int64() + s.FinalizedFeaturesEpoch = v + if err := b.Complete(); err != nil { + return err + } + case 2: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := s.FinalizedFeatures + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ApiVersionsResponseFinalizedFeature, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int16() + s.MaxVersionLevel = v + } + { + v := b.Int16() + s.MinVersionLevel = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.FinalizedFeatures = v + if err := b.Complete(); err != nil { + return err + } + case 3: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := b.Bool() + s.ZkMigrationReady = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrApiVersionsResponse returns a pointer to a default ApiVersionsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrApiVersionsResponse() *ApiVersionsResponse { + var v ApiVersionsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsResponse. +func (v *ApiVersionsResponse) Default() { + v.FinalizedFeaturesEpoch = -1 +} + +// NewApiVersionsResponse returns a default ApiVersionsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsResponse() ApiVersionsResponse { + var v ApiVersionsResponse + v.Default() + return v +} + +type CreateTopicsRequestTopicReplicaAssignment struct { + // Partition is a partition to create. + Partition int32 + + // Replicas are broker IDs the partition must exist on. + Replicas []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsRequestTopicReplicaAssignment. +func (v *CreateTopicsRequestTopicReplicaAssignment) Default() { +} + +// NewCreateTopicsRequestTopicReplicaAssignment returns a default CreateTopicsRequestTopicReplicaAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsRequestTopicReplicaAssignment() CreateTopicsRequestTopicReplicaAssignment { + var v CreateTopicsRequestTopicReplicaAssignment + v.Default() + return v +} + +type CreateTopicsRequestTopicConfig struct { + // Name is a topic level config key (e.g. segment.bytes). + Name string + + // Value is a topic level config value (e.g. 1073741824) + Value *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsRequestTopicConfig. +func (v *CreateTopicsRequestTopicConfig) Default() { +} + +// NewCreateTopicsRequestTopicConfig returns a default CreateTopicsRequestTopicConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsRequestTopicConfig() CreateTopicsRequestTopicConfig { + var v CreateTopicsRequestTopicConfig + v.Default() + return v +} + +type CreateTopicsRequestTopic struct { + // Topic is a topic to create. + Topic string + + // NumPartitions is how many partitions to give a topic. This must + // be -1 if specifying partitions manually (see ReplicaAssignment) + // or, starting v4+, to use the broker default partitions. + NumPartitions int32 + + // ReplicationFactor is how many replicas every partition must have. + // This must be -1 if specifying partitions manually (see ReplicaAssignment) + // or, starting v4+, to use the broker default replication factor. + ReplicationFactor int16 + + // ReplicaAssignment is an array to manually dicate replicas and their + // partitions for a topic. If using this, both ReplicationFactor and + // NumPartitions must be -1. + ReplicaAssignment []CreateTopicsRequestTopicReplicaAssignment + + // Configs is an array of key value config pairs for a topic. + // These correspond to Kafka Topic-Level Configs: http://kafka.apache.org/documentation/#topicconfigs. + Configs []CreateTopicsRequestTopicConfig + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsRequestTopic. +func (v *CreateTopicsRequestTopic) Default() { +} + +// NewCreateTopicsRequestTopic returns a default CreateTopicsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsRequestTopic() CreateTopicsRequestTopic { + var v CreateTopicsRequestTopic + v.Default() + return v +} + +// CreateTopicsRequest creates Kafka topics. +// +// Version 4, introduced in Kafka 2.4.0, implies client support for +// creation defaults. See KIP-464. +// +// Version 5, also in 2.4.0, returns topic configs in the response (KIP-525). +type CreateTopicsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is an array of topics to attempt to create. + Topics []CreateTopicsRequestTopic + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // ValidateOnly is makes this request a dry-run; everything is validated but + // no topics are actually created. + ValidateOnly bool // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +func (*CreateTopicsRequest) Key() int16 { return 19 } +func (*CreateTopicsRequest) MaxVersion() int16 { return 7 } +func (v *CreateTopicsRequest) SetVersion(version int16) { v.Version = version } +func (v *CreateTopicsRequest) GetVersion() int16 { return v.Version } +func (v *CreateTopicsRequest) IsFlexible() bool { return v.Version >= 5 } +func (v *CreateTopicsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *CreateTopicsRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *CreateTopicsRequest) IsAdminRequest() {} +func (v *CreateTopicsRequest) ResponseKind() Response { + r := &CreateTopicsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *CreateTopicsRequest) RequestWith(ctx context.Context, r Requestor) (*CreateTopicsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*CreateTopicsResponse) + return resp, err +} + +func (v *CreateTopicsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.NumPartitions + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ReplicationFactor + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ReplicaAssignment + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateTopicsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateTopicsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateTopicsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.NumPartitions = v + } + { + v := b.Int16() + s.ReplicationFactor = v + } + { + v := s.ReplicaAssignment + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsRequestTopicReplicaAssignment, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ReplicaAssignment = v + } + { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsRequestTopicConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + if version >= 1 { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateTopicsRequest returns a pointer to a default CreateTopicsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateTopicsRequest() *CreateTopicsRequest { + var v CreateTopicsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsRequest. +func (v *CreateTopicsRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewCreateTopicsRequest returns a default CreateTopicsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsRequest() CreateTopicsRequest { + var v CreateTopicsRequest + v.Default() + return v +} + +type CreateTopicsResponseTopicConfig struct { + // Name is the configuration name (e.g. segment.bytes). + Name string + + // Value is the value for this config key. If the key is sensitive, + // the value will be null. + Value *string + + // ReadOnly signifies whether this is not a dynamic config option. + ReadOnly bool + + // Source is where this config entry is from. See the documentation + // on DescribeConfigsRequest's Source for more details. + // + // This field has a default of -1. + Source int8 + + // IsSensitive signifies whether this is a sensitive config key, which + // is either a password or an unknown type. + IsSensitive bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsResponseTopicConfig. +func (v *CreateTopicsResponseTopicConfig) Default() { + v.Source = -1 +} + +// NewCreateTopicsResponseTopicConfig returns a default CreateTopicsResponseTopicConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsResponseTopicConfig() CreateTopicsResponseTopicConfig { + var v CreateTopicsResponseTopicConfig + v.Default() + return v +} + +type CreateTopicsResponseTopic struct { + // Topic is the topic this response corresponds to. + Topic string + + // The unique topic ID. + TopicID [16]byte // v7+ + + // ErrorCode is the error code for an individual topic creation. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized. + // + // INVALID_REQUEST is returned if the same topic occurred multiple times + // in the request. + // + // POLICY_VIOLATION is returned if the broker is using a + // create.topic.policy.class.name that returns a policy violation. + // + // INVALID_TOPIC_EXCEPTION if the topic collides with another topic when + // both topic's names' periods are replaced with underscores (e.g. + // topic.foo and topic_foo collide). + // + // TOPIC_ALREADY_EXISTS is returned if the topic already exists. + // + // INVALID_PARTITIONS is returned if the requested number of partitions is + // <= 0. + // + // INVALID_REPLICATION_FACTOR is returned if the requested replication + // factor is <= 0. + // + // INVALID_REPLICA_ASSIGNMENT is returned if not all partitions have the same + // number of replicas, or duplica replicas are assigned, or the partitions + // are not consecutive starting from 0. + // + // INVALID_CONFIG is returned if the requested topic config is invalid. + // to create a topic. + ErrorCode int16 + + // ErrorMessage is an informative message if the topic creation failed. + ErrorMessage *string // v1+ + + // ConfigErrorCode is non-zero if configs are unable to be returned. + // + // This is the first tagged field, introduced in version 5. As such, it is + // only possible to be present in v5+. + ConfigErrorCode int16 // tag 0 + + // NumPartitions is how many partitions were created for this topic. + // + // This field has a default of -1. + NumPartitions int32 // v5+ + + // ReplicationFactor is how many replicas every partition has for this topic. + // + // This field has a default of -1. + ReplicationFactor int16 // v5+ + + // Configs contains this topic's configuration. + Configs []CreateTopicsResponseTopicConfig // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsResponseTopic. +func (v *CreateTopicsResponseTopic) Default() { + v.NumPartitions = -1 + v.ReplicationFactor = -1 +} + +// NewCreateTopicsResponseTopic returns a default CreateTopicsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsResponseTopic() CreateTopicsResponseTopic { + var v CreateTopicsResponseTopic + v.Default() + return v +} + +// CreateTopicsResponse is returned from a CreateTopicsRequest. +type CreateTopicsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 3. + ThrottleMillis int32 // v2+ + + // Topics contains responses to the requested topic creations. + Topics []CreateTopicsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +func (*CreateTopicsResponse) Key() int16 { return 19 } +func (*CreateTopicsResponse) MaxVersion() int16 { return 7 } +func (v *CreateTopicsResponse) SetVersion(version int16) { v.Version = version } +func (v *CreateTopicsResponse) GetVersion() int16 { return v.Version } +func (v *CreateTopicsResponse) IsFlexible() bool { return v.Version >= 5 } +func (v *CreateTopicsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 3 } +func (v *CreateTopicsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *CreateTopicsResponse) RequestKind() Request { return &CreateTopicsRequest{Version: v.Version} } + +func (v *CreateTopicsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + if version >= 2 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 7 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 1 { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.NumPartitions + dst = kbin.AppendInt32(dst, v) + } + if version >= 5 { + v := v.ReplicationFactor + dst = kbin.AppendInt16(dst, v) + } + if version >= 5 { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ReadOnly + dst = kbin.AppendBool(dst, v) + } + { + v := v.Source + dst = kbin.AppendInt8(dst, v) + } + { + v := v.IsSensitive + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + var toEncode []uint32 + if v.ConfigErrorCode != 0 { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.ConfigErrorCode + dst = kbin.AppendUvarint(dst, 0) + dst = kbin.AppendUvarint(dst, 2) + dst = kbin.AppendInt16(dst, v) + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateTopicsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateTopicsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateTopicsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + s := v + if version >= 2 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 7 { + v := b.Uuid() + s.TopicID = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 1 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if version >= 5 { + v := b.Int32() + s.NumPartitions = v + } + if version >= 5 { + v := b.Int16() + s.ReplicationFactor = v + } + if version >= 5 { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []CreateTopicsResponseTopicConfig{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsResponseTopicConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + { + v := b.Bool() + s.ReadOnly = v + } + { + v := b.Int8() + s.Source = v + } + { + v := b.Bool() + s.IsSensitive = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := b.Int16() + s.ConfigErrorCode = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateTopicsResponse returns a pointer to a default CreateTopicsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateTopicsResponse() *CreateTopicsResponse { + var v CreateTopicsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsResponse. +func (v *CreateTopicsResponse) Default() { +} + +// NewCreateTopicsResponse returns a default CreateTopicsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsResponse() CreateTopicsResponse { + var v CreateTopicsResponse + v.Default() + return v +} + +type DeleteTopicsRequestTopic struct { + Topic *string + + TopicID [16]byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteTopicsRequestTopic. +func (v *DeleteTopicsRequestTopic) Default() { +} + +// NewDeleteTopicsRequestTopic returns a default DeleteTopicsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteTopicsRequestTopic() DeleteTopicsRequestTopic { + var v DeleteTopicsRequestTopic + v.Default() + return v +} + +// DeleteTopicsRequest deletes Kafka topics. +type DeleteTopicsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is an array of topics to delete. + TopicNames []string // v0-v5 + + // The name or topic ID of topics to delete. + Topics []DeleteTopicsRequestTopic // v6+ + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 15000. + TimeoutMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*DeleteTopicsRequest) Key() int16 { return 20 } +func (*DeleteTopicsRequest) MaxVersion() int16 { return 6 } +func (v *DeleteTopicsRequest) SetVersion(version int16) { v.Version = version } +func (v *DeleteTopicsRequest) GetVersion() int16 { return v.Version } +func (v *DeleteTopicsRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *DeleteTopicsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *DeleteTopicsRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *DeleteTopicsRequest) IsAdminRequest() {} +func (v *DeleteTopicsRequest) ResponseKind() Response { + r := &DeleteTopicsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DeleteTopicsRequest) RequestWith(ctx context.Context, r Requestor) (*DeleteTopicsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DeleteTopicsResponse) + return resp, err +} + +func (v *DeleteTopicsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 0 && version <= 5 { + v := v.TopicNames + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if version >= 6 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteTopicsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteTopicsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteTopicsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 0 && version <= 5 { + v := s.TopicNames + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.TopicNames = v + } + if version >= 6 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteTopicsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Topic = v + } + { + v := b.Uuid() + s.TopicID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteTopicsRequest returns a pointer to a default DeleteTopicsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteTopicsRequest() *DeleteTopicsRequest { + var v DeleteTopicsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteTopicsRequest. +func (v *DeleteTopicsRequest) Default() { + v.TimeoutMillis = 15000 +} + +// NewDeleteTopicsRequest returns a default DeleteTopicsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteTopicsRequest() DeleteTopicsRequest { + var v DeleteTopicsRequest + v.Default() + return v +} + +type DeleteTopicsResponseTopic struct { + // Topic is the topic requested for deletion. + Topic *string + + // The topic ID requested for deletion. + TopicID [16]byte // v6+ + + // ErrorCode is the error code returned for an individual topic in + // deletion request. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // to delete a topic. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the topic. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // TOPIC_DELETION_DISABLED is returned for deletion requests version 3+ + // and brokers >= 2.1.0. INVALID_REQUEST is issued for request versions + // 0-2 against brokers >= 2.1.0. Otherwise, the request hangs until it + // times out. + // + // UNSUPPORTED_VERSION is returned when using topic IDs with a cluster + // that is not yet Kafka v2.8+. + // + // UNKNOWN_TOPIC_ID is returned when using topic IDs to a Kafka cluster + // v2.8+ and the topic ID is not found. + ErrorCode int16 + + // ErrorMessage is a message for an error. + ErrorMessage *string // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteTopicsResponseTopic. +func (v *DeleteTopicsResponseTopic) Default() { +} + +// NewDeleteTopicsResponseTopic returns a default DeleteTopicsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteTopicsResponseTopic() DeleteTopicsResponseTopic { + var v DeleteTopicsResponseTopic + v.Default() + return v +} + +// DeleteTopicsResponse is returned from a DeleteTopicsRequest. +// Version 3 added the TOPIC_DELETION_DISABLED error proposed in KIP-322 +// and introduced in Kafka 2.1.0. Prior, the request timed out. +type DeleteTopicsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // Topics contains responses for each topic requested for deletion. + Topics []DeleteTopicsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*DeleteTopicsResponse) Key() int16 { return 20 } +func (*DeleteTopicsResponse) MaxVersion() int16 { return 6 } +func (v *DeleteTopicsResponse) SetVersion(version int16) { v.Version = version } +func (v *DeleteTopicsResponse) GetVersion() int16 { return v.Version } +func (v *DeleteTopicsResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *DeleteTopicsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *DeleteTopicsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DeleteTopicsResponse) RequestKind() Request { return &DeleteTopicsRequest{Version: v.Version} } + +func (v *DeleteTopicsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if version < 6 { + var vv string + if v != nil { + vv = *v + } + { + v := vv + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } else { + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + } + if version >= 6 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 5 { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteTopicsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteTopicsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteTopicsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteTopicsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v *string + if version < 6 { + var vv string + if isFlexible { + if unsafe { + vv = b.UnsafeCompactString() + } else { + vv = b.CompactString() + } + } else { + if unsafe { + vv = b.UnsafeString() + } else { + vv = b.String() + } + } + v = &vv + } else { + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + } + s.Topic = v + } + if version >= 6 { + v := b.Uuid() + s.TopicID = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteTopicsResponse returns a pointer to a default DeleteTopicsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteTopicsResponse() *DeleteTopicsResponse { + var v DeleteTopicsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteTopicsResponse. +func (v *DeleteTopicsResponse) Default() { +} + +// NewDeleteTopicsResponse returns a default DeleteTopicsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteTopicsResponse() DeleteTopicsResponse { + var v DeleteTopicsResponse + v.Default() + return v +} + +type DeleteRecordsRequestTopicPartition struct { + // Partition is a partition to delete records from. + Partition int32 + + // Offset is the offset to set the partition's low watermark (start + // offset) to. After a successful response, all records before this + // offset are considered deleted and are no longer readable. + // + // To delete all records, use -1, which is mapped to the partition's + // current high watermark. + Offset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsRequestTopicPartition. +func (v *DeleteRecordsRequestTopicPartition) Default() { +} + +// NewDeleteRecordsRequestTopicPartition returns a default DeleteRecordsRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsRequestTopicPartition() DeleteRecordsRequestTopicPartition { + var v DeleteRecordsRequestTopicPartition + v.Default() + return v +} + +type DeleteRecordsRequestTopic struct { + // Topic is a topic to delete records from. + Topic string + + // Partitions contains partitions to delete records from. + Partitions []DeleteRecordsRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsRequestTopic. +func (v *DeleteRecordsRequestTopic) Default() { +} + +// NewDeleteRecordsRequestTopic returns a default DeleteRecordsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsRequestTopic() DeleteRecordsRequestTopic { + var v DeleteRecordsRequestTopic + v.Default() + return v +} + +// DeleteRecordsRequest is an admin request to delete records from Kafka. +// This was added for KIP-107. +// +// To delete records, Kafka sets the LogStartOffset for partitions to +// the requested offset. All segments whose max partition is before the +// requested offset are deleted, and any records within the segment before +// the requested offset can no longer be read. +// +// This request must be issued to the correct brokers that own the partitions +// you intend to delete records for. +type DeleteRecordsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics contains topics for which to delete records from. + Topics []DeleteRecordsRequestTopic + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 15000. + TimeoutMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteRecordsRequest) Key() int16 { return 21 } +func (*DeleteRecordsRequest) MaxVersion() int16 { return 2 } +func (v *DeleteRecordsRequest) SetVersion(version int16) { v.Version = version } +func (v *DeleteRecordsRequest) GetVersion() int16 { return v.Version } +func (v *DeleteRecordsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteRecordsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *DeleteRecordsRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *DeleteRecordsRequest) ResponseKind() Response { + r := &DeleteRecordsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DeleteRecordsRequest) RequestWith(ctx context.Context, r Requestor) (*DeleteRecordsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DeleteRecordsResponse) + return resp, err +} + +func (v *DeleteRecordsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteRecordsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteRecordsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteRecordsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteRecordsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteRecordsRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteRecordsRequest returns a pointer to a default DeleteRecordsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteRecordsRequest() *DeleteRecordsRequest { + var v DeleteRecordsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsRequest. +func (v *DeleteRecordsRequest) Default() { + v.TimeoutMillis = 15000 +} + +// NewDeleteRecordsRequest returns a default DeleteRecordsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsRequest() DeleteRecordsRequest { + var v DeleteRecordsRequest + v.Default() + return v +} + +type DeleteRecordsResponseTopicPartition struct { + // Partition is the partition this response corresponds to. + Partition int32 + + // LowWatermark is the new earliest offset for this partition. + LowWatermark int64 + + // ErrorCode is the error code returned for a given partition in + // the delete request. + // + // TOPIC_AUTHORIZATION_FAILED is returned for all partitions if the + // client is not authorized to delete records. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned for all partitions that + // the requested broker does not know of. + // + // NOT_LEADER_FOR_PARTITION is returned for partitions that the + // requested broker is not a leader of. + // + // OFFSET_OUT_OF_RANGE is returned if the requested offset is + // negative or higher than the current high watermark. + // + // POLICY_VIOLATION is returned if records cannot be deleted due to + // broker configuration. + // + // KAFKA_STORAGE_EXCEPTION is returned if the partition is in an + // offline log directory. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsResponseTopicPartition. +func (v *DeleteRecordsResponseTopicPartition) Default() { +} + +// NewDeleteRecordsResponseTopicPartition returns a default DeleteRecordsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsResponseTopicPartition() DeleteRecordsResponseTopicPartition { + var v DeleteRecordsResponseTopicPartition + v.Default() + return v +} + +type DeleteRecordsResponseTopic struct { + // Topic is the topic this response corresponds to. + Topic string + + // Partitions contains responses for each partition in a requested topic + // in the delete records request. + Partitions []DeleteRecordsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsResponseTopic. +func (v *DeleteRecordsResponseTopic) Default() { +} + +// NewDeleteRecordsResponseTopic returns a default DeleteRecordsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsResponseTopic() DeleteRecordsResponseTopic { + var v DeleteRecordsResponseTopic + v.Default() + return v +} + +// DeleteRecordsResponse is returned from a DeleteRecordsRequest. +type DeleteRecordsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Topics contains responses for each topic in the delete records request. + Topics []DeleteRecordsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteRecordsResponse) Key() int16 { return 21 } +func (*DeleteRecordsResponse) MaxVersion() int16 { return 2 } +func (v *DeleteRecordsResponse) SetVersion(version int16) { v.Version = version } +func (v *DeleteRecordsResponse) GetVersion() int16 { return v.Version } +func (v *DeleteRecordsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteRecordsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DeleteRecordsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DeleteRecordsResponse) RequestKind() Request { + return &DeleteRecordsRequest{Version: v.Version} +} + +func (v *DeleteRecordsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LowWatermark + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteRecordsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteRecordsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteRecordsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteRecordsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteRecordsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.LowWatermark = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteRecordsResponse returns a pointer to a default DeleteRecordsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteRecordsResponse() *DeleteRecordsResponse { + var v DeleteRecordsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsResponse. +func (v *DeleteRecordsResponse) Default() { +} + +// NewDeleteRecordsResponse returns a default DeleteRecordsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsResponse() DeleteRecordsResponse { + var v DeleteRecordsResponse + v.Default() + return v +} + +// InitProducerIDRequest initializes a producer ID for idempotent transactions, +// and if using transactions, a producer epoch. This is the first request +// necessary to begin idempotent producing or transactions. +// +// Note that you do not need to go to a txn coordinator if you are initializing +// a producer id without a transactional id. +type InitProducerIDRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the ID to use for transactions if using transactions. + TransactionalID *string + + // TransactionTimeoutMillis is how long a transaction is allowed before + // EndTxn is required. + // + // Note that this timeout only begins on the first AddPartitionsToTxn + // request. + TransactionTimeoutMillis int32 + + // ProducerID, added for KIP-360, is the current producer ID. This allows + // the client to potentially recover on UNKNOWN_PRODUCER_ID errors. + // + // This field has a default of -1. + ProducerID int64 // v3+ + + // The producer's current epoch. This will be checked against the producer + // epoch on the broker, and the request will return an error if they do not + // match. Also added for KIP-360. + // + // This field has a default of -1. + ProducerEpoch int16 // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*InitProducerIDRequest) Key() int16 { return 22 } +func (*InitProducerIDRequest) MaxVersion() int16 { return 4 } +func (v *InitProducerIDRequest) SetVersion(version int16) { v.Version = version } +func (v *InitProducerIDRequest) GetVersion() int16 { return v.Version } +func (v *InitProducerIDRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *InitProducerIDRequest) IsTxnCoordinatorRequest() {} +func (v *InitProducerIDRequest) ResponseKind() Response { + r := &InitProducerIDResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *InitProducerIDRequest) RequestWith(ctx context.Context, r Requestor) (*InitProducerIDResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*InitProducerIDResponse) + return resp, err +} + +func (v *InitProducerIDRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.TransactionTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + if version >= 3 { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *InitProducerIDRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *InitProducerIDRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *InitProducerIDRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.TransactionalID = v + } + { + v := b.Int32() + s.TransactionTimeoutMillis = v + } + if version >= 3 { + v := b.Int64() + s.ProducerID = v + } + if version >= 3 { + v := b.Int16() + s.ProducerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrInitProducerIDRequest returns a pointer to a default InitProducerIDRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrInitProducerIDRequest() *InitProducerIDRequest { + var v InitProducerIDRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to InitProducerIDRequest. +func (v *InitProducerIDRequest) Default() { + v.ProducerID = -1 + v.ProducerEpoch = -1 +} + +// NewInitProducerIDRequest returns a default InitProducerIDRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewInitProducerIDRequest() InitProducerIDRequest { + var v InitProducerIDRequest + v.Default() + return v +} + +// InitProducerIDResponse is returned for an InitProducerIDRequest. +type InitProducerIDResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // CLUSTER_AUTHORIZATION_FAILED is returned when not using transactions if + // the client is not authorized for idempotent_write on cluster. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned when using transactions + // if the client is not authorized to write on transactional_id. + // + // INVALID_REQUEST is returned if using transactions and the transactional id + // is an empty, non-null string + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the coordinator for this + // transactional ID is still loading. + // + // NOT_COORDINATOR is returned if the broker is not the coordinator for + // this transactional ID. + // + // INVALID_TRANSACTION_TIMEOUT is returned if using transactions and the timeout + // is equal to over over transaction.max.timeout.ms or under 0. + // + // CONCURRENT_TRANSACTIONS is returned if there is an ongoing transaction + // that is completing at the time this init is called. + ErrorCode int16 + + // ProducerID is the next producer ID that Kafka generated. This ID is used + // to ensure repeated produce requests do not result in duplicate records. + // + // This field has a default of -1. + ProducerID int64 + + // ProducerEpoch is the producer epoch to use for transactions. + ProducerEpoch int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*InitProducerIDResponse) Key() int16 { return 22 } +func (*InitProducerIDResponse) MaxVersion() int16 { return 4 } +func (v *InitProducerIDResponse) SetVersion(version int16) { v.Version = version } +func (v *InitProducerIDResponse) GetVersion() int16 { return v.Version } +func (v *InitProducerIDResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *InitProducerIDResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *InitProducerIDResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *InitProducerIDResponse) RequestKind() Request { + return &InitProducerIDRequest{Version: v.Version} +} + +func (v *InitProducerIDResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *InitProducerIDResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *InitProducerIDResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *InitProducerIDResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrInitProducerIDResponse returns a pointer to a default InitProducerIDResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrInitProducerIDResponse() *InitProducerIDResponse { + var v InitProducerIDResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to InitProducerIDResponse. +func (v *InitProducerIDResponse) Default() { + v.ProducerID = -1 +} + +// NewInitProducerIDResponse returns a default InitProducerIDResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewInitProducerIDResponse() InitProducerIDResponse { + var v InitProducerIDResponse + v.Default() + return v +} + +type OffsetForLeaderEpochRequestTopicPartition struct { + // Partition is the number of a partition. + Partition int32 + + // CurrentLeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // allows brokers to check if the client is fenced (has an out of date + // leader) or if the client is ahead of the broker. + // + // The initial leader epoch can be determined from a MetadataResponse. + // + // This field has a default of -1. + CurrentLeaderEpoch int32 // v2+ + + // LeaderEpoch is the epoch to fetch the end offset for. + LeaderEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochRequestTopicPartition. +func (v *OffsetForLeaderEpochRequestTopicPartition) Default() { + v.CurrentLeaderEpoch = -1 +} + +// NewOffsetForLeaderEpochRequestTopicPartition returns a default OffsetForLeaderEpochRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochRequestTopicPartition() OffsetForLeaderEpochRequestTopicPartition { + var v OffsetForLeaderEpochRequestTopicPartition + v.Default() + return v +} + +type OffsetForLeaderEpochRequestTopic struct { + // Topic is the name of a topic. + Topic string + + // Partitions are partitions within a topic to fetch leader epoch offsets for. + Partitions []OffsetForLeaderEpochRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochRequestTopic. +func (v *OffsetForLeaderEpochRequestTopic) Default() { +} + +// NewOffsetForLeaderEpochRequestTopic returns a default OffsetForLeaderEpochRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochRequestTopic() OffsetForLeaderEpochRequestTopic { + var v OffsetForLeaderEpochRequestTopic + v.Default() + return v +} + +// OffsetForLeaderEpochRequest requests log end offsets for partitions. +// +// Version 2, proposed in KIP-320 and introduced in Kafka 2.1.0, can be used by +// consumers to perform more accurate offset resetting in the case of data loss. +// +// In support of version 2, this requires DESCRIBE on TOPIC. +type OffsetForLeaderEpochRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ReplicaID, added in support of KIP-392, is the broker ID of the follower, + // or -1 if this request is from a consumer. + // + // This field has a default of -2. + ReplicaID int32 // v3+ + + // Topics are topics to fetch leader epoch offsets for. + Topics []OffsetForLeaderEpochRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*OffsetForLeaderEpochRequest) Key() int16 { return 23 } +func (*OffsetForLeaderEpochRequest) MaxVersion() int16 { return 4 } +func (v *OffsetForLeaderEpochRequest) SetVersion(version int16) { v.Version = version } +func (v *OffsetForLeaderEpochRequest) GetVersion() int16 { return v.Version } +func (v *OffsetForLeaderEpochRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *OffsetForLeaderEpochRequest) ResponseKind() Response { + r := &OffsetForLeaderEpochResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *OffsetForLeaderEpochRequest) RequestWith(ctx context.Context, r Requestor) (*OffsetForLeaderEpochResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*OffsetForLeaderEpochResponse) + return resp, err +} + +func (v *OffsetForLeaderEpochRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 3 { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 2 { + v := v.CurrentLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetForLeaderEpochRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetForLeaderEpochRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetForLeaderEpochRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 3 { + v := b.Int32() + s.ReplicaID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetForLeaderEpochRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetForLeaderEpochRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + if version >= 2 { + v := b.Int32() + s.CurrentLeaderEpoch = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetForLeaderEpochRequest returns a pointer to a default OffsetForLeaderEpochRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetForLeaderEpochRequest() *OffsetForLeaderEpochRequest { + var v OffsetForLeaderEpochRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochRequest. +func (v *OffsetForLeaderEpochRequest) Default() { + v.ReplicaID = -2 +} + +// NewOffsetForLeaderEpochRequest returns a default OffsetForLeaderEpochRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochRequest() OffsetForLeaderEpochRequest { + var v OffsetForLeaderEpochRequest + v.Default() + return v +} + +type OffsetForLeaderEpochResponseTopicPartition struct { + // ErrorCode is the error code returned on request failure. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client does not have + // the necessary permissions to issue this request. + // + // KAFKA_STORAGE_ERROR is returned if the partition is offline. + // + // NOT_LEADER_FOR_PARTITION is returned if the broker knows of the partition + // but does not own it. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of the + // partition. + // + // FENCED_LEADER_EPOCH is returned if the client is using a current leader epoch + // older than the actual leader epoch. + // + // UNKNOWN_LEADER_EPOCH if returned if the client is using a current leader epoch + // that the actual leader does not know of. This could occur when the client + // has newer metadata than the broker when the broker just became the leader for + // a replica. + ErrorCode int16 + + // Partition is the partition this response is for. + Partition int32 + + // LeaderEpoch is similar to the requested leader epoch, but pairs with the + // next field. If the requested leader epoch is unknown, this is -1. If the + // requested epoch had no records produced during the requested epoch, this + // is the first prior epoch that had records. + // + // This field has a default of -1. + LeaderEpoch int32 // v1+ + + // EndOffset is either (1) just past the last recorded offset in the + // current partition if the broker leader has the same epoch as the + // leader epoch in the request, or (2) the beginning offset of the next + // epoch if the leader is past the requested epoch. The second scenario + // can be seen as equivalent to the first: the beginning offset of the + // next epoch is just past the final offset of the prior epoch. + // + // (2) allows consumers to detect data loss: if the consumer consumed + // past the end offset that is returned, then the consumer should reset + // to the returned offset and the consumer knows everything past the end + // offset was lost. + // + // With the prior field, consumers know that at this offset, the broker + // either has no more records (consumer is caught up), or the broker + // transitioned to a new epoch. + // + // This field has a default of -1. + EndOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochResponseTopicPartition. +func (v *OffsetForLeaderEpochResponseTopicPartition) Default() { + v.LeaderEpoch = -1 + v.EndOffset = -1 +} + +// NewOffsetForLeaderEpochResponseTopicPartition returns a default OffsetForLeaderEpochResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochResponseTopicPartition() OffsetForLeaderEpochResponseTopicPartition { + var v OffsetForLeaderEpochResponseTopicPartition + v.Default() + return v +} + +type OffsetForLeaderEpochResponseTopic struct { + // Topic is the topic this response corresponds to. + Topic string + + // Partitions are responses to partitions in a topic in the request. + Partitions []OffsetForLeaderEpochResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochResponseTopic. +func (v *OffsetForLeaderEpochResponseTopic) Default() { +} + +// NewOffsetForLeaderEpochResponseTopic returns a default OffsetForLeaderEpochResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochResponseTopic() OffsetForLeaderEpochResponseTopic { + var v OffsetForLeaderEpochResponseTopic + v.Default() + return v +} + +// OffsetForLeaderEpochResponse is returned from an OffsetForLeaderEpochRequest. +type OffsetForLeaderEpochResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 // v2+ + + // Topics are responses to topics in the request. + Topics []OffsetForLeaderEpochResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*OffsetForLeaderEpochResponse) Key() int16 { return 23 } +func (*OffsetForLeaderEpochResponse) MaxVersion() int16 { return 4 } +func (v *OffsetForLeaderEpochResponse) SetVersion(version int16) { v.Version = version } +func (v *OffsetForLeaderEpochResponse) GetVersion() int16 { return v.Version } +func (v *OffsetForLeaderEpochResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *OffsetForLeaderEpochResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *OffsetForLeaderEpochResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *OffsetForLeaderEpochResponse) RequestKind() Request { + return &OffsetForLeaderEpochRequest{Version: v.Version} +} + +func (v *OffsetForLeaderEpochResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 2 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetForLeaderEpochResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetForLeaderEpochResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetForLeaderEpochResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 2 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetForLeaderEpochResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetForLeaderEpochResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.Partition = v + } + if version >= 1 { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := b.Int64() + s.EndOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetForLeaderEpochResponse returns a pointer to a default OffsetForLeaderEpochResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetForLeaderEpochResponse() *OffsetForLeaderEpochResponse { + var v OffsetForLeaderEpochResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochResponse. +func (v *OffsetForLeaderEpochResponse) Default() { +} + +// NewOffsetForLeaderEpochResponse returns a default OffsetForLeaderEpochResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochResponse() OffsetForLeaderEpochResponse { + var v OffsetForLeaderEpochResponse + v.Default() + return v +} + +type AddPartitionsToTxnRequestTopic struct { + // Topic is a topic name. + Topic string + + // Partitions are partitions within a topic to add as part of the producer + // side of a transaction. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnRequestTopic. +func (v *AddPartitionsToTxnRequestTopic) Default() { +} + +// NewAddPartitionsToTxnRequestTopic returns a default AddPartitionsToTxnRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnRequestTopic() AddPartitionsToTxnRequestTopic { + var v AddPartitionsToTxnRequestTopic + v.Default() + return v +} + +type AddPartitionsToTxnRequestTransactionTopic struct { + Topic string + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnRequestTransactionTopic. +func (v *AddPartitionsToTxnRequestTransactionTopic) Default() { +} + +// NewAddPartitionsToTxnRequestTransactionTopic returns a default AddPartitionsToTxnRequestTransactionTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnRequestTransactionTopic() AddPartitionsToTxnRequestTransactionTopic { + var v AddPartitionsToTxnRequestTransactionTopic + v.Default() + return v +} + +type AddPartitionsToTxnRequestTransaction struct { + TransactionalID string + + ProducerID int64 + + ProducerEpoch int16 + + // VerifyOnly signifies if we want to check if the partition is in the + // transaction rather than add it. + VerifyOnly bool + + Topics []AddPartitionsToTxnRequestTransactionTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnRequestTransaction. +func (v *AddPartitionsToTxnRequestTransaction) Default() { +} + +// NewAddPartitionsToTxnRequestTransaction returns a default AddPartitionsToTxnRequestTransaction +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnRequestTransaction() AddPartitionsToTxnRequestTransaction { + var v AddPartitionsToTxnRequestTransaction + v.Default() + return v +} + +// AddPartitionsToTxnRequest begins the producer side of a transaction for all +// partitions in the request. Before producing any records to a partition in +// the transaction, that partition must have been added to the transaction with +// this request. +// +// Versions 3 and below are exclusively used by clients and versions 4 and +// above are used by brokers. +// +// Version 4 adds VerifyOnly field to check if partitions are already in +// transaction and adds support to batch multiple transactions. +type AddPartitionsToTxnRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the transactional ID to use for this request. + TransactionalID string // v0-v3 + + // ProducerID is the producer ID of the client for this transactional ID + // as received from InitProducerID. + ProducerID int64 // v0-v3 + + // ProducerEpoch is the producer epoch of the client for this transactional ID + // as received from InitProducerID. + ProducerEpoch int16 // v0-v3 + + // Topics are topics to add as part of the producer side of a transaction. + Topics []AddPartitionsToTxnRequestTopic // v0-v3 + + // The list of transactions to add partitions to, for v4+, for brokers only. + // The fields in this are batch broker requests that duplicate the above fields + // and thus are undocumented (except VerifyOnly, which is new). + Transactions []AddPartitionsToTxnRequestTransaction // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*AddPartitionsToTxnRequest) Key() int16 { return 24 } +func (*AddPartitionsToTxnRequest) MaxVersion() int16 { return 4 } +func (v *AddPartitionsToTxnRequest) SetVersion(version int16) { v.Version = version } +func (v *AddPartitionsToTxnRequest) GetVersion() int16 { return v.Version } +func (v *AddPartitionsToTxnRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *AddPartitionsToTxnRequest) IsTxnCoordinatorRequest() {} +func (v *AddPartitionsToTxnRequest) ResponseKind() Response { + r := &AddPartitionsToTxnResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AddPartitionsToTxnRequest) RequestWith(ctx context.Context, r Requestor) (*AddPartitionsToTxnResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AddPartitionsToTxnResponse) + return resp, err +} + +func (v *AddPartitionsToTxnRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 0 && version <= 3 { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 3 { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + if version >= 0 && version <= 3 { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + if version >= 0 && version <= 3 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 4 { + v := v.Transactions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.VerifyOnly + dst = kbin.AppendBool(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AddPartitionsToTxnRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AddPartitionsToTxnRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AddPartitionsToTxnRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 0 && version <= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + if version >= 0 && version <= 3 { + v := b.Int64() + s.ProducerID = v + } + if version >= 0 && version <= 3 { + v := b.Int16() + s.ProducerEpoch = v + } + if version >= 0 && version <= 3 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 4 { + v := s.Transactions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnRequestTransaction, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Bool() + s.VerifyOnly = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnRequestTransactionTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Transactions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAddPartitionsToTxnRequest returns a pointer to a default AddPartitionsToTxnRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAddPartitionsToTxnRequest() *AddPartitionsToTxnRequest { + var v AddPartitionsToTxnRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnRequest. +func (v *AddPartitionsToTxnRequest) Default() { +} + +// NewAddPartitionsToTxnRequest returns a default AddPartitionsToTxnRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnRequest() AddPartitionsToTxnRequest { + var v AddPartitionsToTxnRequest + v.Default() + return v +} + +type AddPartitionsToTxnResponseTransactionTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTransactionTopicPartition. +func (v *AddPartitionsToTxnResponseTransactionTopicPartition) Default() { +} + +// NewAddPartitionsToTxnResponseTransactionTopicPartition returns a default AddPartitionsToTxnResponseTransactionTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTransactionTopicPartition() AddPartitionsToTxnResponseTransactionTopicPartition { + var v AddPartitionsToTxnResponseTransactionTopicPartition + v.Default() + return v +} + +type AddPartitionsToTxnResponseTransactionTopic struct { + Topic string + + Partitions []AddPartitionsToTxnResponseTransactionTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTransactionTopic. +func (v *AddPartitionsToTxnResponseTransactionTopic) Default() { +} + +// NewAddPartitionsToTxnResponseTransactionTopic returns a default AddPartitionsToTxnResponseTransactionTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTransactionTopic() AddPartitionsToTxnResponseTransactionTopic { + var v AddPartitionsToTxnResponseTransactionTopic + v.Default() + return v +} + +type AddPartitionsToTxnResponseTransaction struct { + // The transactional id corresponding to the transaction. + TransactionalID string + + Topics []AddPartitionsToTxnResponseTransactionTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTransaction. +func (v *AddPartitionsToTxnResponseTransaction) Default() { +} + +// NewAddPartitionsToTxnResponseTransaction returns a default AddPartitionsToTxnResponseTransaction +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTransaction() AddPartitionsToTxnResponseTransaction { + var v AddPartitionsToTxnResponseTransaction + v.Default() + return v +} + +type AddPartitionsToTxnResponseTopicPartition struct { + // Partition is a partition being responded to. + Partition int32 + + // ErrorCode is any error for this topic/partition commit. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the client is + // not authorized for write with transactional IDs with the requested + // transactional ID. + // + // TOPIC_AUTHORIZATION_FAILED is returned for all topics that the client + // is not authorized to write to. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned for all topics or partitions + // that the broker does not know of. + // + // OPERATION_NOT_ATTEMPTED is returned if any of the above errors occur + // for all partitions that did not have the above errors. + // + // INVALID_REQUEST is returned if the transactional ID is invalid. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the coordinator for this + // transactional ID is still loading. + // + // NOT_COORDINATOR is returned if the broker is not the coordinator for + // this transactional ID. + // + // INVALID_PRODUCER_ID_MAPPING is returned if the produce request used + // a producer ID that is not tied to the transactional ID (i.e., mismatch + // from what was returned from InitProducerID). + // + // INVALID_PRODUCER_EPOCH is returned if the requested epoch does not match + // the broker epoch for this transactional ID. + // + // CONCURRENT_TRANSACTIONS is returned if there is an ongoing transaction for + // this transactional ID, if the producer ID and epoch matches the broker's. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTopicPartition. +func (v *AddPartitionsToTxnResponseTopicPartition) Default() { +} + +// NewAddPartitionsToTxnResponseTopicPartition returns a default AddPartitionsToTxnResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTopicPartition() AddPartitionsToTxnResponseTopicPartition { + var v AddPartitionsToTxnResponseTopicPartition + v.Default() + return v +} + +type AddPartitionsToTxnResponseTopic struct { + // Topic is a topic being responded to. + Topic string + + // Partitions are responses to partitions in the request. + Partitions []AddPartitionsToTxnResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTopic. +func (v *AddPartitionsToTxnResponseTopic) Default() { +} + +// NewAddPartitionsToTxnResponseTopic returns a default AddPartitionsToTxnResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTopic() AddPartitionsToTxnResponseTopic { + var v AddPartitionsToTxnResponseTopic + v.Default() + return v +} + +// AddPartitionsToTxnResponse is a response to an AddPartitionsToTxnRequest. +type AddPartitionsToTxnResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // The response top level error code. + ErrorCode int16 // v4+ + + // Results categorized by transactional ID, v4+ only, for brokers only. + // The fields duplicate v3 and below fields (except TransactionalID) and + // are left undocumented. + Transactions []AddPartitionsToTxnResponseTransaction // v4+ + + // Topics are responses to topics in the request. + Topics []AddPartitionsToTxnResponseTopic // v0-v3 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*AddPartitionsToTxnResponse) Key() int16 { return 24 } +func (*AddPartitionsToTxnResponse) MaxVersion() int16 { return 4 } +func (v *AddPartitionsToTxnResponse) SetVersion(version int16) { v.Version = version } +func (v *AddPartitionsToTxnResponse) GetVersion() int16 { return v.Version } +func (v *AddPartitionsToTxnResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *AddPartitionsToTxnResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *AddPartitionsToTxnResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AddPartitionsToTxnResponse) RequestKind() Request { + return &AddPartitionsToTxnRequest{Version: v.Version} +} + +func (v *AddPartitionsToTxnResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 4 { + v := v.Transactions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 0 && version <= 3 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AddPartitionsToTxnResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AddPartitionsToTxnResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AddPartitionsToTxnResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 4 { + v := b.Int16() + s.ErrorCode = v + } + if version >= 4 { + v := s.Transactions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTransaction, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTransactionTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTransactionTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Transactions = v + } + if version >= 0 && version <= 3 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAddPartitionsToTxnResponse returns a pointer to a default AddPartitionsToTxnResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAddPartitionsToTxnResponse() *AddPartitionsToTxnResponse { + var v AddPartitionsToTxnResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponse. +func (v *AddPartitionsToTxnResponse) Default() { +} + +// NewAddPartitionsToTxnResponse returns a default AddPartitionsToTxnResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponse() AddPartitionsToTxnResponse { + var v AddPartitionsToTxnResponse + v.Default() + return v +} + +// AddOffsetsToTxnRequest is a request that ties produced records to what group +// is being consumed for the transaction. +// +// This request must be called before TxnOffsetCommitRequest. +// +// Internally, this request simply adds the __consumer_offsets topic as a +// partition for this transaction with AddPartitionsToTxn for the partition +// in that topic that contains the group. +type AddOffsetsToTxnRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the transactional ID to use for this request. + TransactionalID string + + // ProducerID is the producer ID of the client for this transactional ID + // as received from InitProducerID. + ProducerID int64 + + // ProducerEpoch is the producer epoch of the client for this transactional ID + // as received from InitProducerID. + ProducerEpoch int16 + + // Group is the group to tie this transaction to. + Group string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*AddOffsetsToTxnRequest) Key() int16 { return 25 } +func (*AddOffsetsToTxnRequest) MaxVersion() int16 { return 3 } +func (v *AddOffsetsToTxnRequest) SetVersion(version int16) { v.Version = version } +func (v *AddOffsetsToTxnRequest) GetVersion() int16 { return v.Version } +func (v *AddOffsetsToTxnRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *AddOffsetsToTxnRequest) IsTxnCoordinatorRequest() {} +func (v *AddOffsetsToTxnRequest) ResponseKind() Response { + r := &AddOffsetsToTxnResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AddOffsetsToTxnRequest) RequestWith(ctx context.Context, r Requestor) (*AddOffsetsToTxnResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AddOffsetsToTxnResponse) + return resp, err +} + +func (v *AddOffsetsToTxnRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AddOffsetsToTxnRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AddOffsetsToTxnRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AddOffsetsToTxnRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAddOffsetsToTxnRequest returns a pointer to a default AddOffsetsToTxnRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAddOffsetsToTxnRequest() *AddOffsetsToTxnRequest { + var v AddOffsetsToTxnRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddOffsetsToTxnRequest. +func (v *AddOffsetsToTxnRequest) Default() { +} + +// NewAddOffsetsToTxnRequest returns a default AddOffsetsToTxnRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddOffsetsToTxnRequest() AddOffsetsToTxnRequest { + var v AddOffsetsToTxnRequest + v.Default() + return v +} + +// AddOffsetsToTxnResponse is a response to an AddOffsetsToTxnRequest. +type AddOffsetsToTxnResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // ErrorCode is any error for this topic/partition commit. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the client is + // not authorized for write with transactional IDs with the requested + // transactional ID. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to read group with the requested group id. + // + // This also can return any error that AddPartitionsToTxn returns. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*AddOffsetsToTxnResponse) Key() int16 { return 25 } +func (*AddOffsetsToTxnResponse) MaxVersion() int16 { return 3 } +func (v *AddOffsetsToTxnResponse) SetVersion(version int16) { v.Version = version } +func (v *AddOffsetsToTxnResponse) GetVersion() int16 { return v.Version } +func (v *AddOffsetsToTxnResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *AddOffsetsToTxnResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *AddOffsetsToTxnResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AddOffsetsToTxnResponse) RequestKind() Request { + return &AddOffsetsToTxnRequest{Version: v.Version} +} + +func (v *AddOffsetsToTxnResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AddOffsetsToTxnResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AddOffsetsToTxnResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AddOffsetsToTxnResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAddOffsetsToTxnResponse returns a pointer to a default AddOffsetsToTxnResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAddOffsetsToTxnResponse() *AddOffsetsToTxnResponse { + var v AddOffsetsToTxnResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddOffsetsToTxnResponse. +func (v *AddOffsetsToTxnResponse) Default() { +} + +// NewAddOffsetsToTxnResponse returns a default AddOffsetsToTxnResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddOffsetsToTxnResponse() AddOffsetsToTxnResponse { + var v AddOffsetsToTxnResponse + v.Default() + return v +} + +// EndTxnRequest ends a transaction. This should be called after +// TxnOffsetCommitRequest. +type EndTxnRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the transactional ID to use for this request. + TransactionalID string + + // ProducerID is the producer ID of the client for this transactional ID + // as received from InitProducerID. + ProducerID int64 + + // ProducerEpoch is the producer epoch of the client for this transactional ID + // as received from InitProducerID. + ProducerEpoch int16 + + // Commit is whether to commit this transaction: true for yes, false for abort. + Commit bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*EndTxnRequest) Key() int16 { return 26 } +func (*EndTxnRequest) MaxVersion() int16 { return 3 } +func (v *EndTxnRequest) SetVersion(version int16) { v.Version = version } +func (v *EndTxnRequest) GetVersion() int16 { return v.Version } +func (v *EndTxnRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *EndTxnRequest) IsTxnCoordinatorRequest() {} +func (v *EndTxnRequest) ResponseKind() Response { + r := &EndTxnResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *EndTxnRequest) RequestWith(ctx context.Context, r Requestor) (*EndTxnResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*EndTxnResponse) + return resp, err +} + +func (v *EndTxnRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Commit + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *EndTxnRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndTxnRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndTxnRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Bool() + s.Commit = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrEndTxnRequest returns a pointer to a default EndTxnRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEndTxnRequest() *EndTxnRequest { + var v EndTxnRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndTxnRequest. +func (v *EndTxnRequest) Default() { +} + +// NewEndTxnRequest returns a default EndTxnRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndTxnRequest() EndTxnRequest { + var v EndTxnRequest + v.Default() + return v +} + +// EndTxnResponse is a response for an EndTxnRequest. +type EndTxnResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // ErrorCode is any error for this topic/partition commit. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the client is + // not authorized for write with transactional IDs with the requested + // transactional ID. + // + // INVALID_REQUEST is returned if the transactional ID is invalid. + // + // INVALID_PRODUCER_ID_MAPPING is returned if the produce request used + // a producer ID that is not tied to the transactional ID (i.e., mismatch + // from what was returned from InitProducerID). + // + // INVALID_PRODUCER_EPOCH is returned if the requested epoch does not match + // the broker epoch for this transactional ID. + // + // CONCURRENT_TRANSACTIONS is returned if there is an ongoing transaction for + // this transactional ID, if the producer ID and epoch matches the broker's. + // + // INVALID_TXN_STATE is returned if this request is attempted at the wrong + // time (given the order of how transaction requests should go). + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*EndTxnResponse) Key() int16 { return 26 } +func (*EndTxnResponse) MaxVersion() int16 { return 3 } +func (v *EndTxnResponse) SetVersion(version int16) { v.Version = version } +func (v *EndTxnResponse) GetVersion() int16 { return v.Version } +func (v *EndTxnResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *EndTxnResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *EndTxnResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *EndTxnResponse) RequestKind() Request { return &EndTxnRequest{Version: v.Version} } + +func (v *EndTxnResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *EndTxnResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndTxnResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndTxnResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrEndTxnResponse returns a pointer to a default EndTxnResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEndTxnResponse() *EndTxnResponse { + var v EndTxnResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndTxnResponse. +func (v *EndTxnResponse) Default() { +} + +// NewEndTxnResponse returns a default EndTxnResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndTxnResponse() EndTxnResponse { + var v EndTxnResponse + v.Default() + return v +} + +type WriteTxnMarkersRequestMarkerTopic struct { + // Topic is the name of the topic to write markers for. + Topic string + + // Partitions contains partitions to write markers for. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersRequestMarkerTopic. +func (v *WriteTxnMarkersRequestMarkerTopic) Default() { +} + +// NewWriteTxnMarkersRequestMarkerTopic returns a default WriteTxnMarkersRequestMarkerTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersRequestMarkerTopic() WriteTxnMarkersRequestMarkerTopic { + var v WriteTxnMarkersRequestMarkerTopic + v.Default() + return v +} + +type WriteTxnMarkersRequestMarker struct { + // ProducerID is the current producer ID to use when writing a marker. + ProducerID int64 + + // ProducerEpoch is the current producer epoch to use when writing a + // marker. + ProducerEpoch int16 + + // Committed is true if this marker is for a committed transaction, + // otherwise false if this is for an aborted transaction. + Committed bool + + // Topics contains the topics we are writing markers for. + Topics []WriteTxnMarkersRequestMarkerTopic + + // CoordinatorEpoch is the current epoch of the transaction coordinator we + // are writing a marker to. This is used to detect fenced writers. + CoordinatorEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersRequestMarker. +func (v *WriteTxnMarkersRequestMarker) Default() { +} + +// NewWriteTxnMarkersRequestMarker returns a default WriteTxnMarkersRequestMarker +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersRequestMarker() WriteTxnMarkersRequestMarker { + var v WriteTxnMarkersRequestMarker + v.Default() + return v +} + +// WriteTxnMarkersRequest is a broker-to-broker request that Kafka uses to +// finish transactions. +type WriteTxnMarkersRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Markers contains transactional markers to be written. + Markers []WriteTxnMarkersRequestMarker + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*WriteTxnMarkersRequest) Key() int16 { return 27 } +func (*WriteTxnMarkersRequest) MaxVersion() int16 { return 1 } +func (v *WriteTxnMarkersRequest) SetVersion(version int16) { v.Version = version } +func (v *WriteTxnMarkersRequest) GetVersion() int16 { return v.Version } +func (v *WriteTxnMarkersRequest) IsFlexible() bool { return v.Version >= 1 } +func (v *WriteTxnMarkersRequest) ResponseKind() Response { + r := &WriteTxnMarkersResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *WriteTxnMarkersRequest) RequestWith(ctx context.Context, r Requestor) (*WriteTxnMarkersResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*WriteTxnMarkersResponse) + return resp, err +} + +func (v *WriteTxnMarkersRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Markers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Committed + dst = kbin.AppendBool(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.CoordinatorEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *WriteTxnMarkersRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *WriteTxnMarkersRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *WriteTxnMarkersRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Markers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersRequestMarker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Bool() + s.Committed = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersRequestMarkerTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.CoordinatorEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Markers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrWriteTxnMarkersRequest returns a pointer to a default WriteTxnMarkersRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrWriteTxnMarkersRequest() *WriteTxnMarkersRequest { + var v WriteTxnMarkersRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersRequest. +func (v *WriteTxnMarkersRequest) Default() { +} + +// NewWriteTxnMarkersRequest returns a default WriteTxnMarkersRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersRequest() WriteTxnMarkersRequest { + var v WriteTxnMarkersRequest + v.Default() + return v +} + +type WriteTxnMarkersResponseMarkerTopicPartition struct { + // Partition is the partition this result is for. + Partition int32 + + // ErrorCode is non-nil if writing the transansactional marker for this + // partition errored. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the user does not have + // CLUSTER_ACTION on CLUSTER. + // + // NOT_LEADER_OR_FOLLOWER is returned if the broker receiving this + // request is not the leader of the partition. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic or partition is + // not known to exist. + // + // INVALID_PRODUCER_EPOCH is returned if the cluster epoch is provided + // and the provided epoch does not match. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersResponseMarkerTopicPartition. +func (v *WriteTxnMarkersResponseMarkerTopicPartition) Default() { +} + +// NewWriteTxnMarkersResponseMarkerTopicPartition returns a default WriteTxnMarkersResponseMarkerTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersResponseMarkerTopicPartition() WriteTxnMarkersResponseMarkerTopicPartition { + var v WriteTxnMarkersResponseMarkerTopicPartition + v.Default() + return v +} + +type WriteTxnMarkersResponseMarkerTopic struct { + // Topic is the topic these results are for. + Topic string + + // Partitions contains per-partition results for the write markers + // request. + Partitions []WriteTxnMarkersResponseMarkerTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersResponseMarkerTopic. +func (v *WriteTxnMarkersResponseMarkerTopic) Default() { +} + +// NewWriteTxnMarkersResponseMarkerTopic returns a default WriteTxnMarkersResponseMarkerTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersResponseMarkerTopic() WriteTxnMarkersResponseMarkerTopic { + var v WriteTxnMarkersResponseMarkerTopic + v.Default() + return v +} + +type WriteTxnMarkersResponseMarker struct { + // ProducerID is the producer ID these results are for (from the input + // request). + ProducerID int64 + + // Topics contains the results for the write markers request. + Topics []WriteTxnMarkersResponseMarkerTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersResponseMarker. +func (v *WriteTxnMarkersResponseMarker) Default() { +} + +// NewWriteTxnMarkersResponseMarker returns a default WriteTxnMarkersResponseMarker +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersResponseMarker() WriteTxnMarkersResponseMarker { + var v WriteTxnMarkersResponseMarker + v.Default() + return v +} + +// WriteTxnMarkersResponse is a response to a WriteTxnMarkersRequest. +type WriteTxnMarkersResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Markers contains results for writing transactional markers. + Markers []WriteTxnMarkersResponseMarker + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*WriteTxnMarkersResponse) Key() int16 { return 27 } +func (*WriteTxnMarkersResponse) MaxVersion() int16 { return 1 } +func (v *WriteTxnMarkersResponse) SetVersion(version int16) { v.Version = version } +func (v *WriteTxnMarkersResponse) GetVersion() int16 { return v.Version } +func (v *WriteTxnMarkersResponse) IsFlexible() bool { return v.Version >= 1 } +func (v *WriteTxnMarkersResponse) RequestKind() Request { + return &WriteTxnMarkersRequest{Version: v.Version} +} + +func (v *WriteTxnMarkersResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Markers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *WriteTxnMarkersResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *WriteTxnMarkersResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *WriteTxnMarkersResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Markers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersResponseMarker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersResponseMarkerTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersResponseMarkerTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Markers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrWriteTxnMarkersResponse returns a pointer to a default WriteTxnMarkersResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrWriteTxnMarkersResponse() *WriteTxnMarkersResponse { + var v WriteTxnMarkersResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersResponse. +func (v *WriteTxnMarkersResponse) Default() { +} + +// NewWriteTxnMarkersResponse returns a default WriteTxnMarkersResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersResponse() WriteTxnMarkersResponse { + var v WriteTxnMarkersResponse + v.Default() + return v +} + +type TxnOffsetCommitRequestTopicPartition struct { + // Partition is a partition to add for a pending commit. + Partition int32 + + // Offset is the offset within partition to commit once EndTxnRequest is + // called (with commit; abort obviously aborts). + Offset int64 + + // LeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // allows brokers to check if the client is fenced (has an out of date + // leader) or is using an unknown leader. + // + // The initial leader epoch can be determined from a MetadataResponse. + // To skip log truncation checking, use -1. + // + // This field has a default of -1. + LeaderEpoch int32 // v2+ + + // Metadata is optional metadata the client wants to include with this + // commit. + Metadata *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitRequestTopicPartition. +func (v *TxnOffsetCommitRequestTopicPartition) Default() { + v.LeaderEpoch = -1 +} + +// NewTxnOffsetCommitRequestTopicPartition returns a default TxnOffsetCommitRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitRequestTopicPartition() TxnOffsetCommitRequestTopicPartition { + var v TxnOffsetCommitRequestTopicPartition + v.Default() + return v +} + +type TxnOffsetCommitRequestTopic struct { + // Topic is a topic to add for a pending commit. + Topic string + + // Partitions are partitions to add for pending commits. + Partitions []TxnOffsetCommitRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitRequestTopic. +func (v *TxnOffsetCommitRequestTopic) Default() { +} + +// NewTxnOffsetCommitRequestTopic returns a default TxnOffsetCommitRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitRequestTopic() TxnOffsetCommitRequestTopic { + var v TxnOffsetCommitRequestTopic + v.Default() + return v +} + +// TxnOffsetCommitRequest sends offsets that are a part of this transaction +// to be committed once the transaction itself finishes. This effectively +// replaces OffsetCommitRequest for when using transactions. +type TxnOffsetCommitRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the transactional ID to use for this request. + TransactionalID string + + // Group is the group consumed in this transaction and to be used for + // committing. + Group string + + // ProducerID is the producer ID of the client for this transactional ID + // as received from InitProducerID. + ProducerID int64 + + // ProducerEpoch is the producer epoch of the client for this transactional ID + // as received from InitProducerID. + ProducerEpoch int16 + + // Generation is the group generation this transactional offset commit request is for. + // + // This field has a default of -1. + Generation int32 // v3+ + + // MemberID is the member ID this member is for. + MemberID string // v3+ + + // InstanceID is the instance ID of this member in the group (KIP-345, KIP-447). + InstanceID *string // v3+ + + // Topics are topics to add for pending commits. + Topics []TxnOffsetCommitRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*TxnOffsetCommitRequest) Key() int16 { return 28 } +func (*TxnOffsetCommitRequest) MaxVersion() int16 { return 3 } +func (v *TxnOffsetCommitRequest) SetVersion(version int16) { v.Version = version } +func (v *TxnOffsetCommitRequest) GetVersion() int16 { return v.Version } +func (v *TxnOffsetCommitRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *TxnOffsetCommitRequest) IsGroupCoordinatorRequest() {} +func (v *TxnOffsetCommitRequest) ResponseKind() Response { + r := &TxnOffsetCommitResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *TxnOffsetCommitRequest) RequestWith(ctx context.Context, r Requestor) (*TxnOffsetCommitResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*TxnOffsetCommitResponse) + return resp, err +} + +func (v *TxnOffsetCommitRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + if version >= 3 { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 2 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *TxnOffsetCommitRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *TxnOffsetCommitRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *TxnOffsetCommitRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + if version >= 3 { + v := b.Int32() + s.Generation = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnOffsetCommitRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnOffsetCommitRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + if version >= 2 { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Metadata = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrTxnOffsetCommitRequest returns a pointer to a default TxnOffsetCommitRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrTxnOffsetCommitRequest() *TxnOffsetCommitRequest { + var v TxnOffsetCommitRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitRequest. +func (v *TxnOffsetCommitRequest) Default() { + v.Generation = -1 +} + +// NewTxnOffsetCommitRequest returns a default TxnOffsetCommitRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitRequest() TxnOffsetCommitRequest { + var v TxnOffsetCommitRequest + v.Default() + return v +} + +type TxnOffsetCommitResponseTopicPartition struct { + // Partition is the partition this response is for. + Partition int32 + + // ErrorCode is any error for this topic/partition commit. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the client is + // not authorized for write with transactional IDs with the requested + // transactional ID. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to read group with the requested group id. + // + // TOPIC_AUTHORIZATION_FAILED is returned for all topics that the client + // is not authorized to read. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned for all topics or partitions + // that the broker does not know of. + // + // INVALID_GROUP_ID is returned if the requested group does not exist. + // + // COORDINATOR_NOT_AVAILABLE is returned if the broker is not yet fully + // started or is shutting down, or if the group was just deleted or is + // migrating to another broker. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is still loading. + // + // NOT_COORDINATOR is returned if the broker is not the coordinator for + // the group. + // + // FENCED_INSTANCE_ID is returned if the member is fenced (another newer + // transactional member is using the same instance ID). + // + // UNKNOWN_MEMBER_ID is returned if the consumer group does not know of + // this member. + // + // ILLEGAL_GENERATION is returned if the consumer group's generation is + // different than the requested generation. + // + // OFFSET_METADATA_TOO_LARGE is returned if the commit metadata is too + // large. + // + // REBALANCE_IN_PROGRESS is returned if the group is completing a rebalance. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitResponseTopicPartition. +func (v *TxnOffsetCommitResponseTopicPartition) Default() { +} + +// NewTxnOffsetCommitResponseTopicPartition returns a default TxnOffsetCommitResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitResponseTopicPartition() TxnOffsetCommitResponseTopicPartition { + var v TxnOffsetCommitResponseTopicPartition + v.Default() + return v +} + +type TxnOffsetCommitResponseTopic struct { + // Topic is the topic this response is for. + Topic string + + // Partitions contains responses to the partitions in this topic. + Partitions []TxnOffsetCommitResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitResponseTopic. +func (v *TxnOffsetCommitResponseTopic) Default() { +} + +// NewTxnOffsetCommitResponseTopic returns a default TxnOffsetCommitResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitResponseTopic() TxnOffsetCommitResponseTopic { + var v TxnOffsetCommitResponseTopic + v.Default() + return v +} + +// TxnOffsetCommitResponse is a response to a TxnOffsetCommitRequest. +type TxnOffsetCommitResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Topics contains responses to the topics in the request. + Topics []TxnOffsetCommitResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*TxnOffsetCommitResponse) Key() int16 { return 28 } +func (*TxnOffsetCommitResponse) MaxVersion() int16 { return 3 } +func (v *TxnOffsetCommitResponse) SetVersion(version int16) { v.Version = version } +func (v *TxnOffsetCommitResponse) GetVersion() int16 { return v.Version } +func (v *TxnOffsetCommitResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *TxnOffsetCommitResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *TxnOffsetCommitResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *TxnOffsetCommitResponse) RequestKind() Request { + return &TxnOffsetCommitRequest{Version: v.Version} +} + +func (v *TxnOffsetCommitResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *TxnOffsetCommitResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *TxnOffsetCommitResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *TxnOffsetCommitResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnOffsetCommitResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnOffsetCommitResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrTxnOffsetCommitResponse returns a pointer to a default TxnOffsetCommitResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrTxnOffsetCommitResponse() *TxnOffsetCommitResponse { + var v TxnOffsetCommitResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitResponse. +func (v *TxnOffsetCommitResponse) Default() { +} + +// NewTxnOffsetCommitResponse returns a default TxnOffsetCommitResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitResponse() TxnOffsetCommitResponse { + var v TxnOffsetCommitResponse + v.Default() + return v +} + +// DescribeACLsRequest describes ACLs. Describing ACLs works on a filter basis: +// anything that matches the filter is described. Note that there are two +// "types" of filters in this request: the resource filter and the entry +// filter, with entries corresponding to users. The first three fields form the +// resource filter, the last four the entry filter. +type DescribeACLsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ResourceType is the type of resource to describe. + ResourceType ACLResourceType + + // ResourceName is the name to filter out. For the CLUSTER resource type, + // this must be "kafka-cluster". + ResourceName *string + + // ResourcePatternType is how ResourceName is understood. + // + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + // Principal is the user to filter for. In Kafka with the simple authorizor, + // all principals begin with "User:". Pluggable authorizors are allowed, but + // Kafka still expects principals to lead with a principal type ("User") and + // have a colon separating the principal name ("bob" in "User:bob"). + Principal *string + + // Host is a host to filter for. + Host *string + + // Operation is an operation to filter for. + // + // Note that READ, WRITE, DELETE, and ALTER imply DESCRIBE, and ALTER_CONFIGS + // implies DESCRIBE_CONFIGS. + Operation ACLOperation + + // PermissionType is the permission type to filter for. UNKNOWN is 0. + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeACLsRequest) Key() int16 { return 29 } +func (*DescribeACLsRequest) MaxVersion() int16 { return 3 } +func (v *DescribeACLsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeACLsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeACLsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeACLsRequest) ResponseKind() Response { + r := &DescribeACLsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeACLsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeACLsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeACLsResponse) + return resp, err +} + +func (v *DescribeACLsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeACLsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeACLsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeACLsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Principal = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeACLsRequest returns a pointer to a default DescribeACLsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeACLsRequest() *DescribeACLsRequest { + var v DescribeACLsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeACLsRequest. +func (v *DescribeACLsRequest) Default() { + v.ResourcePatternType = 3 +} + +// NewDescribeACLsRequest returns a default DescribeACLsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeACLsRequest() DescribeACLsRequest { + var v DescribeACLsRequest + v.Default() + return v +} + +type DescribeACLsResponseResourceACL struct { + // Principal is who this ACL applies to. + Principal string + + // Host is on which host this ACL applies. + Host string + + // Operation is the operation being described. + Operation ACLOperation + + // PermissionType is the permission being described. + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeACLsResponseResourceACL. +func (v *DescribeACLsResponseResourceACL) Default() { +} + +// NewDescribeACLsResponseResourceACL returns a default DescribeACLsResponseResourceACL +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeACLsResponseResourceACL() DescribeACLsResponseResourceACL { + var v DescribeACLsResponseResourceACL + v.Default() + return v +} + +type DescribeACLsResponseResource struct { + // ResourceType is the resource type being described. + ResourceType ACLResourceType + + // ResourceName is the resource name being described. + ResourceName string + + // ResourcePatternType is the pattern type being described. + // + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + // ACLs contains users / entries being described. + ACLs []DescribeACLsResponseResourceACL + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeACLsResponseResource. +func (v *DescribeACLsResponseResource) Default() { + v.ResourcePatternType = 3 +} + +// NewDescribeACLsResponseResource returns a default DescribeACLsResponseResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeACLsResponseResource() DescribeACLsResponseResource { + var v DescribeACLsResponseResource + v.Default() + return v +} + +// DescribeACLsResponse is a response to a describe acls request. +type DescribeACLsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // ErrorCode is the error code returned on request failure. + // + // SECURITY_DISABLED is returned if there is no authorizer configured on the + // broker. + // + // There can be other authorization failures. + ErrorCode int16 + + // ErrorMessage is a message for an error. + ErrorMessage *string + + // Resources are the describe resources. + Resources []DescribeACLsResponseResource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeACLsResponse) Key() int16 { return 29 } +func (*DescribeACLsResponse) MaxVersion() int16 { return 3 } +func (v *DescribeACLsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeACLsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeACLsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeACLsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DescribeACLsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DescribeACLsResponse) RequestKind() Request { return &DescribeACLsRequest{Version: v.Version} } + +func (v *DescribeACLsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ACLs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeACLsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeACLsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeACLsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeACLsResponseResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + v := s.ACLs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeACLsResponseResourceACL, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Principal = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ACLs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeACLsResponse returns a pointer to a default DescribeACLsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeACLsResponse() *DescribeACLsResponse { + var v DescribeACLsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeACLsResponse. +func (v *DescribeACLsResponse) Default() { +} + +// NewDescribeACLsResponse returns a default DescribeACLsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeACLsResponse() DescribeACLsResponse { + var v DescribeACLsResponse + v.Default() + return v +} + +type CreateACLsRequestCreation struct { + // ResourceType is the type of resource this acl entry will be on. + // It is invalid to use UNKNOWN or ANY. + ResourceType ACLResourceType + + // ResourceName is the name of the resource this acl entry will be on. + // For CLUSTER, this must be "kafka-cluster". + ResourceName string + + // ResourcePatternType is the pattern type to use for the resource name. + // This cannot be UNKNOWN or MATCH (i.e. this must be LITERAL or PREFIXED). + // The default for pre-Kafka 2.0.0 is effectively LITERAL. + // + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + // Principal is the user to apply this acl for. With the Kafka simple + // authorizer, this must begin with "User:". + Principal string + + // Host is the host address to use for this acl. Each host to allow + // the principal access from must be specified as a new creation. KIP-252 + // might solve this someday. The special wildcard host "*" allows all hosts. + Host string + + // Operation is the operation this acl is for. This must not be UNKNOWN or + // ANY. + Operation ACLOperation + + // PermissionType is the permission of this acl. This must be either ALLOW + // or DENY. + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateACLsRequestCreation. +func (v *CreateACLsRequestCreation) Default() { + v.ResourcePatternType = 3 +} + +// NewCreateACLsRequestCreation returns a default CreateACLsRequestCreation +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateACLsRequestCreation() CreateACLsRequestCreation { + var v CreateACLsRequestCreation + v.Default() + return v +} + +// CreateACLsRequest creates acls. Creating acls can be done as a batch; each +// "creation" will be an acl entry. +// +// See the DescribeACLsRequest documentation for more descriptions of what +// valid values for the fields in this request are. +type CreateACLsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + Creations []CreateACLsRequestCreation + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreateACLsRequest) Key() int16 { return 30 } +func (*CreateACLsRequest) MaxVersion() int16 { return 3 } +func (v *CreateACLsRequest) SetVersion(version int16) { v.Version = version } +func (v *CreateACLsRequest) GetVersion() int16 { return v.Version } +func (v *CreateACLsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *CreateACLsRequest) ResponseKind() Response { + r := &CreateACLsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *CreateACLsRequest) RequestWith(ctx context.Context, r Requestor) (*CreateACLsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*CreateACLsResponse) + return resp, err +} + +func (v *CreateACLsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Creations + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateACLsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateACLsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateACLsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Creations + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateACLsRequestCreation, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Principal = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Creations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateACLsRequest returns a pointer to a default CreateACLsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateACLsRequest() *CreateACLsRequest { + var v CreateACLsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateACLsRequest. +func (v *CreateACLsRequest) Default() { +} + +// NewCreateACLsRequest returns a default CreateACLsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateACLsRequest() CreateACLsRequest { + var v CreateACLsRequest + v.Default() + return v +} + +type CreateACLsResponseResult struct { + // ErrorCode is an error for this particular creation (index wise). + ErrorCode int16 + + // ErrorMessage is a message for this error. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateACLsResponseResult. +func (v *CreateACLsResponseResult) Default() { +} + +// NewCreateACLsResponseResult returns a default CreateACLsResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateACLsResponseResult() CreateACLsResponseResult { + var v CreateACLsResponseResult + v.Default() + return v +} + +// CreateACLsResponse is a response for a CreateACLsRequest. +type CreateACLsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Results contains responses to each creation request. + Results []CreateACLsResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreateACLsResponse) Key() int16 { return 30 } +func (*CreateACLsResponse) MaxVersion() int16 { return 3 } +func (v *CreateACLsResponse) SetVersion(version int16) { v.Version = version } +func (v *CreateACLsResponse) GetVersion() int16 { return v.Version } +func (v *CreateACLsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *CreateACLsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *CreateACLsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *CreateACLsResponse) RequestKind() Request { return &CreateACLsRequest{Version: v.Version} } + +func (v *CreateACLsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateACLsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateACLsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateACLsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateACLsResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateACLsResponse returns a pointer to a default CreateACLsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateACLsResponse() *CreateACLsResponse { + var v CreateACLsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateACLsResponse. +func (v *CreateACLsResponse) Default() { +} + +// NewCreateACLsResponse returns a default CreateACLsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateACLsResponse() CreateACLsResponse { + var v CreateACLsResponse + v.Default() + return v +} + +type DeleteACLsRequestFilter struct { + ResourceType ACLResourceType + + ResourceName *string + + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + Principal *string + + Host *string + + Operation ACLOperation + + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsRequestFilter. +func (v *DeleteACLsRequestFilter) Default() { + v.ResourcePatternType = 3 +} + +// NewDeleteACLsRequestFilter returns a default DeleteACLsRequestFilter +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsRequestFilter() DeleteACLsRequestFilter { + var v DeleteACLsRequestFilter + v.Default() + return v +} + +// DeleteACLsRequest deletes acls. This request works on filters the same way +// that DescribeACLsRequest does. See DescribeACLsRequest for documentation of +// the fields. +type DeleteACLsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Filters are filters for acls to delete. + Filters []DeleteACLsRequestFilter + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteACLsRequest) Key() int16 { return 31 } +func (*DeleteACLsRequest) MaxVersion() int16 { return 3 } +func (v *DeleteACLsRequest) SetVersion(version int16) { v.Version = version } +func (v *DeleteACLsRequest) GetVersion() int16 { return v.Version } +func (v *DeleteACLsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteACLsRequest) ResponseKind() Response { + r := &DeleteACLsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DeleteACLsRequest) RequestWith(ctx context.Context, r Requestor) (*DeleteACLsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DeleteACLsResponse) + return resp, err +} + +func (v *DeleteACLsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Filters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteACLsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteACLsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteACLsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Filters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteACLsRequestFilter, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Principal = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Filters = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteACLsRequest returns a pointer to a default DeleteACLsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteACLsRequest() *DeleteACLsRequest { + var v DeleteACLsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsRequest. +func (v *DeleteACLsRequest) Default() { +} + +// NewDeleteACLsRequest returns a default DeleteACLsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsRequest() DeleteACLsRequest { + var v DeleteACLsRequest + v.Default() + return v +} + +type DeleteACLsResponseResultMatchingACL struct { + // ErrorCode contains an error for this individual acl for this filter. + ErrorCode int16 + + // ErrorMessage is a message for this error. + ErrorMessage *string + + ResourceType ACLResourceType + + ResourceName string + + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + Principal string + + Host string + + Operation ACLOperation + + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsResponseResultMatchingACL. +func (v *DeleteACLsResponseResultMatchingACL) Default() { + v.ResourcePatternType = 3 +} + +// NewDeleteACLsResponseResultMatchingACL returns a default DeleteACLsResponseResultMatchingACL +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsResponseResultMatchingACL() DeleteACLsResponseResultMatchingACL { + var v DeleteACLsResponseResultMatchingACL + v.Default() + return v +} + +type DeleteACLsResponseResult struct { + // ErrorCode is the overall error code for this individual filter. + ErrorCode int16 + + // ErrorMessage is a message for this error. + ErrorMessage *string + + // MatchingACLs contains all acls that were matched for this filter. + MatchingACLs []DeleteACLsResponseResultMatchingACL + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsResponseResult. +func (v *DeleteACLsResponseResult) Default() { +} + +// NewDeleteACLsResponseResult returns a default DeleteACLsResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsResponseResult() DeleteACLsResponseResult { + var v DeleteACLsResponseResult + v.Default() + return v +} + +// DeleteACLsResponse is a response for a DeleteACLsRequest. +type DeleteACLsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Results contains a response to each requested filter. + Results []DeleteACLsResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteACLsResponse) Key() int16 { return 31 } +func (*DeleteACLsResponse) MaxVersion() int16 { return 3 } +func (v *DeleteACLsResponse) SetVersion(version int16) { v.Version = version } +func (v *DeleteACLsResponse) GetVersion() int16 { return v.Version } +func (v *DeleteACLsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteACLsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DeleteACLsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DeleteACLsResponse) RequestKind() Request { return &DeleteACLsRequest{Version: v.Version} } + +func (v *DeleteACLsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MatchingACLs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteACLsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteACLsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteACLsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteACLsResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.MatchingACLs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteACLsResponseResultMatchingACL, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Principal = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.MatchingACLs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteACLsResponse returns a pointer to a default DeleteACLsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteACLsResponse() *DeleteACLsResponse { + var v DeleteACLsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsResponse. +func (v *DeleteACLsResponse) Default() { +} + +// NewDeleteACLsResponse returns a default DeleteACLsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsResponse() DeleteACLsResponse { + var v DeleteACLsResponse + v.Default() + return v +} + +type DescribeConfigsRequestResource struct { + // ResourceType is an enum corresponding to the type of config to describe. + ResourceType ConfigResourceType + + // ResourceName is the name of config to describe. + // + // If the requested type is a topic, this corresponds to a topic name. + // + // If the requested type if a broker, this should either be empty or be + // the ID of the broker this request is issued to. If it is empty, this + // returns all broker configs, but only the dynamic configuration values. + // If a specific ID, this returns all broker config values. + ResourceName string + + // ConfigNames is a list of config entries to return. Null requests all. + ConfigNames []string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsRequestResource. +func (v *DescribeConfigsRequestResource) Default() { +} + +// NewDescribeConfigsRequestResource returns a default DescribeConfigsRequestResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsRequestResource() DescribeConfigsRequestResource { + var v DescribeConfigsRequestResource + v.Default() + return v +} + +// DescribeConfigsRequest issues a request to describe configs that Kafka +// currently has. These are the key/value pairs that one uses to configure +// brokers and topics. +type DescribeConfigsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Resources is a list of resources to describe. + Resources []DescribeConfigsRequestResource + + // IncludeSynonyms signifies whether to return config entry synonyms for + // all config entries. + IncludeSynonyms bool // v1+ + + // IncludeDocumentation signifies whether to return documentation for + // config entries. + IncludeDocumentation bool // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*DescribeConfigsRequest) Key() int16 { return 32 } +func (*DescribeConfigsRequest) MaxVersion() int16 { return 4 } +func (v *DescribeConfigsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeConfigsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeConfigsRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *DescribeConfigsRequest) ResponseKind() Response { + r := &DescribeConfigsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeConfigsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeConfigsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeConfigsResponse) + return resp, err +} + +func (v *DescribeConfigsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ConfigNames + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.IncludeSynonyms + dst = kbin.AppendBool(dst, v) + } + if version >= 3 { + v := v.IncludeDocumentation + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeConfigsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeConfigsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeConfigsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeConfigsRequestResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + { + v := s.ConfigNames + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []string{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.ConfigNames = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if version >= 1 { + v := b.Bool() + s.IncludeSynonyms = v + } + if version >= 3 { + v := b.Bool() + s.IncludeDocumentation = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeConfigsRequest returns a pointer to a default DescribeConfigsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeConfigsRequest() *DescribeConfigsRequest { + var v DescribeConfigsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsRequest. +func (v *DescribeConfigsRequest) Default() { +} + +// NewDescribeConfigsRequest returns a default DescribeConfigsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsRequest() DescribeConfigsRequest { + var v DescribeConfigsRequest + v.Default() + return v +} + +type DescribeConfigsResponseResourceConfigConfigSynonym struct { + Name string + + Value *string + + Source ConfigSource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsResponseResourceConfigConfigSynonym. +func (v *DescribeConfigsResponseResourceConfigConfigSynonym) Default() { +} + +// NewDescribeConfigsResponseResourceConfigConfigSynonym returns a default DescribeConfigsResponseResourceConfigConfigSynonym +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsResponseResourceConfigConfigSynonym() DescribeConfigsResponseResourceConfigConfigSynonym { + var v DescribeConfigsResponseResourceConfigConfigSynonym + v.Default() + return v +} + +type DescribeConfigsResponseResourceConfig struct { + // Name is a key this entry corresponds to (e.g. segment.bytes). + Name string + + // Value is the value for this config key. If the key is sensitive, + // the value will be null. + Value *string + + // ReadOnly signifies whether this is not a dynamic config option. + // + // Note that this field is not always correct, and you may need to check + // whether the Source is any dynamic enum. See franz-go#91 for more details. + ReadOnly bool + + // IsDefault is whether this is a default config option. This has been + // replaced in favor of Source. + IsDefault bool + + // Source is where this config entry is from. + // + // This field has a default of -1. + Source ConfigSource // v1+ + + // IsSensitive signifies whether this is a sensitive config key, which + // is either a password or an unknown type. + IsSensitive bool + + // ConfigSynonyms contains fallback key/value pairs for this config + // entry, in order of preference. That is, if a config entry is both + // dynamically configured and has a default, the top level return will be + // the dynamic configuration, while its "synonym" will be the default. + ConfigSynonyms []DescribeConfigsResponseResourceConfigConfigSynonym // v1+ + + // ConfigType specifies the configuration data type. + ConfigType ConfigType // v3+ + + // Documentation is optional documentation for the config entry. + Documentation *string // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsResponseResourceConfig. +func (v *DescribeConfigsResponseResourceConfig) Default() { + v.Source = -1 +} + +// NewDescribeConfigsResponseResourceConfig returns a default DescribeConfigsResponseResourceConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsResponseResourceConfig() DescribeConfigsResponseResourceConfig { + var v DescribeConfigsResponseResourceConfig + v.Default() + return v +} + +type DescribeConfigsResponseResource struct { + // ErrorCode is the error code returned for describing configs. + // + // INVALID_REQUEST is returned if asking to descibe an invalid resource + // type. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if asking to describe broker + // configs but the client is not authorized to do so. + // + // TOPIC_AUTHORIZATION_FAILED is returned if asking to describe topic + // configs but the client is not authorized to do so. + // + // INVALID_TOPIC_EXCEPTION is returned if the requested topic was invalid. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic. + ErrorCode int16 + + // ErrorMessage is an informative message if the describe config failed. + ErrorMessage *string + + // ResourceType is the enum corresponding to the type of described config. + ResourceType ConfigResourceType + + // ResourceName is the name corresponding to the describe config request. + ResourceName string + + // Configs contains information about key/value config pairs for + // the requested resource. + Configs []DescribeConfigsResponseResourceConfig + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsResponseResource. +func (v *DescribeConfigsResponseResource) Default() { +} + +// NewDescribeConfigsResponseResource returns a default DescribeConfigsResponseResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsResponseResource() DescribeConfigsResponseResource { + var v DescribeConfigsResponseResource + v.Default() + return v +} + +// DescribeConfigsResponse is returned from a DescribeConfigsRequest. +type DescribeConfigsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 + + // Resources are responses for each resource in the describe config request. + Resources []DescribeConfigsResponseResource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*DescribeConfigsResponse) Key() int16 { return 32 } +func (*DescribeConfigsResponse) MaxVersion() int16 { return 4 } +func (v *DescribeConfigsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeConfigsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeConfigsResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *DescribeConfigsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *DescribeConfigsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeConfigsResponse) RequestKind() Request { + return &DescribeConfigsRequest{Version: v.Version} +} + +func (v *DescribeConfigsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ReadOnly + dst = kbin.AppendBool(dst, v) + } + if version >= 0 && version <= 0 { + v := v.IsDefault + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.Source + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.IsSensitive + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.ConfigSynonyms + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Source + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 3 { + v := v.ConfigType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if version >= 3 { + v := v.Documentation + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeConfigsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeConfigsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeConfigsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeConfigsResponseResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeConfigsResponseResourceConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + { + v := b.Bool() + s.ReadOnly = v + } + if version >= 0 && version <= 0 { + v := b.Bool() + s.IsDefault = v + } + if version >= 1 { + var t ConfigSource + { + v := b.Int8() + t = ConfigSource(v) + } + v := t + s.Source = v + } + { + v := b.Bool() + s.IsSensitive = v + } + if version >= 1 { + v := s.ConfigSynonyms + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeConfigsResponseResourceConfigConfigSynonym, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + { + var t ConfigSource + { + v := b.Int8() + t = ConfigSource(v) + } + v := t + s.Source = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ConfigSynonyms = v + } + if version >= 3 { + var t ConfigType + { + v := b.Int8() + t = ConfigType(v) + } + v := t + s.ConfigType = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Documentation = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeConfigsResponse returns a pointer to a default DescribeConfigsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeConfigsResponse() *DescribeConfigsResponse { + var v DescribeConfigsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsResponse. +func (v *DescribeConfigsResponse) Default() { +} + +// NewDescribeConfigsResponse returns a default DescribeConfigsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsResponse() DescribeConfigsResponse { + var v DescribeConfigsResponse + v.Default() + return v +} + +type AlterConfigsRequestResourceConfig struct { + // Name is a key to set (e.g. segment.bytes). + Name string + + // Value is a value to set for the key (e.g. 10). + Value *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsRequestResourceConfig. +func (v *AlterConfigsRequestResourceConfig) Default() { +} + +// NewAlterConfigsRequestResourceConfig returns a default AlterConfigsRequestResourceConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsRequestResourceConfig() AlterConfigsRequestResourceConfig { + var v AlterConfigsRequestResourceConfig + v.Default() + return v +} + +type AlterConfigsRequestResource struct { + // ResourceType is an enum corresponding to the type of config to alter. + // The only two valid values are 2 (for topic) and 4 (for broker). + ResourceType ConfigResourceType + + // ResourceName is the name of config to alter. + // + // If the requested type is a topic, this corresponds to a topic name. + // + // If the requested type if a broker, this should either be empty or be + // the ID of the broker this request is issued to. If it is empty, this + // updates all broker configs. If a specific ID, this updates just the + // broker. Using a specific ID also ensures that brokers reload config + // or secret files even if the file path has not changed. Lastly, password + // config options can only be defined on a per broker basis. + // + // If the type is broker logger, this must be a broker ID. + ResourceName string + + // Configs contains key/value config pairs to set on the resource. + Configs []AlterConfigsRequestResourceConfig + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsRequestResource. +func (v *AlterConfigsRequestResource) Default() { +} + +// NewAlterConfigsRequestResource returns a default AlterConfigsRequestResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsRequestResource() AlterConfigsRequestResource { + var v AlterConfigsRequestResource + v.Default() + return v +} + +// AlterConfigsRequest issues a request to alter either topic or broker +// configs. +// +// Note that to alter configs, you must specify the whole config on every +// request. All existing non-static values will be removed. This means that +// to add one key/value to a config, you must describe the config and then +// issue an alter request with the current config with the new key value. +// This also means that dynamic sensitive values, which are not returned +// in describe configs, will be lost. +// +// To fix this problem, the AlterConfigs request / response was deprecated +// in Kafka 2.3.0 in favor of the new IncrementalAlterConfigs request / response. +// See KIP-339 for more details. +type AlterConfigsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Resources is an array of configs to alter. + Resources []AlterConfigsRequestResource + + // ValidateOnly validates the request but does not apply it. + ValidateOnly bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*AlterConfigsRequest) Key() int16 { return 33 } +func (*AlterConfigsRequest) MaxVersion() int16 { return 2 } +func (v *AlterConfigsRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterConfigsRequest) GetVersion() int16 { return v.Version } +func (v *AlterConfigsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *AlterConfigsRequest) ResponseKind() Response { + r := &AlterConfigsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterConfigsRequest) RequestWith(ctx context.Context, r Requestor) (*AlterConfigsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterConfigsResponse) + return resp, err +} + +func (v *AlterConfigsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterConfigsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterConfigsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterConfigsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterConfigsRequestResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterConfigsRequestResourceConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterConfigsRequest returns a pointer to a default AlterConfigsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterConfigsRequest() *AlterConfigsRequest { + var v AlterConfigsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsRequest. +func (v *AlterConfigsRequest) Default() { +} + +// NewAlterConfigsRequest returns a default AlterConfigsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsRequest() AlterConfigsRequest { + var v AlterConfigsRequest + v.Default() + return v +} + +type AlterConfigsResponseResource struct { + // ErrorCode is the error code returned for altering configs. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if asking to alter broker + // configs but the client is not authorized to do so. + // + // TOPIC_AUTHORIZATION_FAILED is returned if asking to alter topic + // configs but the client is not authorized to do so. + // + // INVALID_TOPIC_EXCEPTION is returned if the requested topic was invalid. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic. + // + // INVALID_REQUEST is returned if the requested config is invalid or if + // asking Kafka to alter an invalid resource. + ErrorCode int16 + + // ErrorMessage is an informative message if the alter config failed. + ErrorMessage *string + + // ResourceType is the enum corresponding to the type of altered config. + ResourceType ConfigResourceType + + // ResourceName is the name corresponding to the alter config request. + ResourceName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsResponseResource. +func (v *AlterConfigsResponseResource) Default() { +} + +// NewAlterConfigsResponseResource returns a default AlterConfigsResponseResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsResponseResource() AlterConfigsResponseResource { + var v AlterConfigsResponseResource + v.Default() + return v +} + +// AlterConfigsResponse is returned from an AlterConfigsRequest. +type AlterConfigsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Resources are responses for each resource in the alter request. + Resources []AlterConfigsResponseResource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*AlterConfigsResponse) Key() int16 { return 33 } +func (*AlterConfigsResponse) MaxVersion() int16 { return 2 } +func (v *AlterConfigsResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterConfigsResponse) GetVersion() int16 { return v.Version } +func (v *AlterConfigsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *AlterConfigsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *AlterConfigsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *AlterConfigsResponse) RequestKind() Request { return &AlterConfigsRequest{Version: v.Version} } + +func (v *AlterConfigsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterConfigsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterConfigsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterConfigsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterConfigsResponseResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterConfigsResponse returns a pointer to a default AlterConfigsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterConfigsResponse() *AlterConfigsResponse { + var v AlterConfigsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsResponse. +func (v *AlterConfigsResponse) Default() { +} + +// NewAlterConfigsResponse returns a default AlterConfigsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsResponse() AlterConfigsResponse { + var v AlterConfigsResponse + v.Default() + return v +} + +type AlterReplicaLogDirsRequestDirTopic struct { + // Topic is a topic to move. + Topic string + + // Partitions contains partitions for the topic to move. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsRequestDirTopic. +func (v *AlterReplicaLogDirsRequestDirTopic) Default() { +} + +// NewAlterReplicaLogDirsRequestDirTopic returns a default AlterReplicaLogDirsRequestDirTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsRequestDirTopic() AlterReplicaLogDirsRequestDirTopic { + var v AlterReplicaLogDirsRequestDirTopic + v.Default() + return v +} + +type AlterReplicaLogDirsRequestDir struct { + // Dir is an absolute path where everything listed below should + // end up. + Dir string + + // Topics contains topics to move to the above log directory. + Topics []AlterReplicaLogDirsRequestDirTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsRequestDir. +func (v *AlterReplicaLogDirsRequestDir) Default() { +} + +// NewAlterReplicaLogDirsRequestDir returns a default AlterReplicaLogDirsRequestDir +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsRequestDir() AlterReplicaLogDirsRequestDir { + var v AlterReplicaLogDirsRequestDir + v.Default() + return v +} + +// AlterReplicaLogDirsRequest requests for log directories to be moved +// within Kafka. +// +// This is primarily useful for moving directories between disks. +type AlterReplicaLogDirsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Dirs contains absolute paths of where you want things to end up. + Dirs []AlterReplicaLogDirsRequestDir + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*AlterReplicaLogDirsRequest) Key() int16 { return 34 } +func (*AlterReplicaLogDirsRequest) MaxVersion() int16 { return 2 } +func (v *AlterReplicaLogDirsRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterReplicaLogDirsRequest) GetVersion() int16 { return v.Version } +func (v *AlterReplicaLogDirsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *AlterReplicaLogDirsRequest) ResponseKind() Response { + r := &AlterReplicaLogDirsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterReplicaLogDirsRequest) RequestWith(ctx context.Context, r Requestor) (*AlterReplicaLogDirsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterReplicaLogDirsResponse) + return resp, err +} + +func (v *AlterReplicaLogDirsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Dirs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Dir + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterReplicaLogDirsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterReplicaLogDirsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterReplicaLogDirsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Dirs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterReplicaLogDirsRequestDir, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Dir = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterReplicaLogDirsRequestDirTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Dirs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterReplicaLogDirsRequest returns a pointer to a default AlterReplicaLogDirsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterReplicaLogDirsRequest() *AlterReplicaLogDirsRequest { + var v AlterReplicaLogDirsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsRequest. +func (v *AlterReplicaLogDirsRequest) Default() { +} + +// NewAlterReplicaLogDirsRequest returns a default AlterReplicaLogDirsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsRequest() AlterReplicaLogDirsRequest { + var v AlterReplicaLogDirsRequest + v.Default() + return v +} + +type AlterReplicaLogDirsResponseTopicPartition struct { + // Partition is the partition this array slot corresponds to. + Partition int32 + + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to alter replica dirs. + // + // LOG_DIR_NOT_FOUND is returned when the requested log directory + // is not in the broker config. + // + // KAFKA_STORAGE_EXCEPTION is returned when destination directory or + // requested replica is offline. + // + // REPLICA_NOT_AVAILABLE is returned if the replica does not exist + // yet. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsResponseTopicPartition. +func (v *AlterReplicaLogDirsResponseTopicPartition) Default() { +} + +// NewAlterReplicaLogDirsResponseTopicPartition returns a default AlterReplicaLogDirsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsResponseTopicPartition() AlterReplicaLogDirsResponseTopicPartition { + var v AlterReplicaLogDirsResponseTopicPartition + v.Default() + return v +} + +type AlterReplicaLogDirsResponseTopic struct { + // Topic is the topic this array slot corresponds to. + Topic string + + // Partitions contains responses to each partition that was requested + // to move. + Partitions []AlterReplicaLogDirsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsResponseTopic. +func (v *AlterReplicaLogDirsResponseTopic) Default() { +} + +// NewAlterReplicaLogDirsResponseTopic returns a default AlterReplicaLogDirsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsResponseTopic() AlterReplicaLogDirsResponseTopic { + var v AlterReplicaLogDirsResponseTopic + v.Default() + return v +} + +// AlterReplicaLogDirsResponse is returned from an AlterReplicaLogDirsRequest. +type AlterReplicaLogDirsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Topics contains responses to each topic that had partitions requested + // for moving. + Topics []AlterReplicaLogDirsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*AlterReplicaLogDirsResponse) Key() int16 { return 34 } +func (*AlterReplicaLogDirsResponse) MaxVersion() int16 { return 2 } +func (v *AlterReplicaLogDirsResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterReplicaLogDirsResponse) GetVersion() int16 { return v.Version } +func (v *AlterReplicaLogDirsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *AlterReplicaLogDirsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *AlterReplicaLogDirsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AlterReplicaLogDirsResponse) RequestKind() Request { + return &AlterReplicaLogDirsRequest{Version: v.Version} +} + +func (v *AlterReplicaLogDirsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterReplicaLogDirsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterReplicaLogDirsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterReplicaLogDirsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterReplicaLogDirsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterReplicaLogDirsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterReplicaLogDirsResponse returns a pointer to a default AlterReplicaLogDirsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterReplicaLogDirsResponse() *AlterReplicaLogDirsResponse { + var v AlterReplicaLogDirsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsResponse. +func (v *AlterReplicaLogDirsResponse) Default() { +} + +// NewAlterReplicaLogDirsResponse returns a default AlterReplicaLogDirsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsResponse() AlterReplicaLogDirsResponse { + var v AlterReplicaLogDirsResponse + v.Default() + return v +} + +type DescribeLogDirsRequestTopic struct { + // Topic is a topic to describe the log dir of. + Topic string + + // Partitions contains topic partitions to describe the log dirs of. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsRequestTopic. +func (v *DescribeLogDirsRequestTopic) Default() { +} + +// NewDescribeLogDirsRequestTopic returns a default DescribeLogDirsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsRequestTopic() DescribeLogDirsRequestTopic { + var v DescribeLogDirsRequestTopic + v.Default() + return v +} + +// DescribeLogDirsRequest requests directory information for topic partitions. +// This request was added in support of KIP-113. +type DescribeLogDirsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is an array of topics to describe the log dirs of. If this is + // null, the response includes all topics and all of their partitions. + Topics []DescribeLogDirsRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeLogDirsRequest) Key() int16 { return 35 } +func (*DescribeLogDirsRequest) MaxVersion() int16 { return 4 } +func (v *DescribeLogDirsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeLogDirsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeLogDirsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeLogDirsRequest) ResponseKind() Response { + r := &DescribeLogDirsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeLogDirsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeLogDirsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeLogDirsResponse) + return resp, err +} + +func (v *DescribeLogDirsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeLogDirsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeLogDirsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeLogDirsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []DescribeLogDirsRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeLogDirsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeLogDirsRequest returns a pointer to a default DescribeLogDirsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeLogDirsRequest() *DescribeLogDirsRequest { + var v DescribeLogDirsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsRequest. +func (v *DescribeLogDirsRequest) Default() { +} + +// NewDescribeLogDirsRequest returns a default DescribeLogDirsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsRequest() DescribeLogDirsRequest { + var v DescribeLogDirsRequest + v.Default() + return v +} + +type DescribeLogDirsResponseDirTopicPartition struct { + // Partition is a partition ID. + Partition int32 + + // Size is the total size of the log sements of this partition, in bytes. + Size int64 + + // OffsetLag is how far behind the log end offset is compared to + // the partition's high watermark (if this is the current log for + // the partition) or compared to the current replica's log end + // offset (if this is the future log for the patition). + // + // The math is, + // + // if IsFuture, localLogEndOffset - futurelogEndOffset. + // + // otherwise, max(localHighWatermark - logEndOffset, 0). + OffsetLag int64 + + // IsFuture is true if this replica was created by an + // AlterReplicaLogDirsRequest and will replace the current log of the + // replica in the future. + IsFuture bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsResponseDirTopicPartition. +func (v *DescribeLogDirsResponseDirTopicPartition) Default() { +} + +// NewDescribeLogDirsResponseDirTopicPartition returns a default DescribeLogDirsResponseDirTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsResponseDirTopicPartition() DescribeLogDirsResponseDirTopicPartition { + var v DescribeLogDirsResponseDirTopicPartition + v.Default() + return v +} + +type DescribeLogDirsResponseDirTopic struct { + // Topic is the name of a Kafka topic. + Topic string + + // Partitions is the set of queried partitions for a topic that are + // within a log directory. + Partitions []DescribeLogDirsResponseDirTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsResponseDirTopic. +func (v *DescribeLogDirsResponseDirTopic) Default() { +} + +// NewDescribeLogDirsResponseDirTopic returns a default DescribeLogDirsResponseDirTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsResponseDirTopic() DescribeLogDirsResponseDirTopic { + var v DescribeLogDirsResponseDirTopic + v.Default() + return v +} + +type DescribeLogDirsResponseDir struct { + // ErrorCode is the error code returned for describing log dirs. + // + // KAFKA_STORAGE_ERROR is returned if the log directory is offline. + ErrorCode int16 + + // Dir is the absolute path of a log directory. + Dir string + + // Topics is an array of topics within a log directory. + Topics []DescribeLogDirsResponseDirTopic + + // TotalBytes is the total size in bytes of the volume the log directory is + // in. + // + // This field has a default of -1. + TotalBytes int64 // v4+ + + // UsableBytes is the usable size in bytes of the volume the log directory + // is in. + // + // This field has a default of -1. + UsableBytes int64 // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsResponseDir. +func (v *DescribeLogDirsResponseDir) Default() { + v.TotalBytes = -1 + v.UsableBytes = -1 +} + +// NewDescribeLogDirsResponseDir returns a default DescribeLogDirsResponseDir +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsResponseDir() DescribeLogDirsResponseDir { + var v DescribeLogDirsResponseDir + v.Default() + return v +} + +// DescribeLogDirsResponse is returned from a DescribeLogDirsRequest. +type DescribeLogDirsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // The error code, or 0 if there was no error. + ErrorCode int16 // v3+ + + // Dirs pairs log directories with the topics and partitions that are + // stored in those directores. + Dirs []DescribeLogDirsResponseDir + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeLogDirsResponse) Key() int16 { return 35 } +func (*DescribeLogDirsResponse) MaxVersion() int16 { return 4 } +func (v *DescribeLogDirsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeLogDirsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeLogDirsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeLogDirsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DescribeLogDirsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeLogDirsResponse) RequestKind() Request { + return &DescribeLogDirsRequest{Version: v.Version} +} + +func (v *DescribeLogDirsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Dirs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Dir + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Size + dst = kbin.AppendInt64(dst, v) + } + { + v := v.OffsetLag + dst = kbin.AppendInt64(dst, v) + } + { + v := v.IsFuture + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 4 { + v := v.TotalBytes + dst = kbin.AppendInt64(dst, v) + } + if version >= 4 { + v := v.UsableBytes + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeLogDirsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeLogDirsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeLogDirsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 3 { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Dirs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeLogDirsResponseDir, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Dir = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeLogDirsResponseDirTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeLogDirsResponseDirTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Size = v + } + { + v := b.Int64() + s.OffsetLag = v + } + { + v := b.Bool() + s.IsFuture = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 4 { + v := b.Int64() + s.TotalBytes = v + } + if version >= 4 { + v := b.Int64() + s.UsableBytes = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Dirs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeLogDirsResponse returns a pointer to a default DescribeLogDirsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeLogDirsResponse() *DescribeLogDirsResponse { + var v DescribeLogDirsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsResponse. +func (v *DescribeLogDirsResponse) Default() { +} + +// NewDescribeLogDirsResponse returns a default DescribeLogDirsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsResponse() DescribeLogDirsResponse { + var v DescribeLogDirsResponse + v.Default() + return v +} + +// SASLAuthenticate continues a sasl authentication flow. Prior to Kafka 1.0.0, +// authenticating with sasl involved sending raw blobs of data back and forth. +// After, those blobs are wrapped in a SASLAuthenticateRequest The benefit of +// this wrapping is that Kafka can indicate errors in the response, rather than +// just closing the connection. Additionally, the response allows for further +// extension fields. +type SASLAuthenticateRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // SASLAuthBytes contains bytes for a SASL client request. + SASLAuthBytes []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*SASLAuthenticateRequest) Key() int16 { return 36 } +func (*SASLAuthenticateRequest) MaxVersion() int16 { return 2 } +func (v *SASLAuthenticateRequest) SetVersion(version int16) { v.Version = version } +func (v *SASLAuthenticateRequest) GetVersion() int16 { return v.Version } +func (v *SASLAuthenticateRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *SASLAuthenticateRequest) ResponseKind() Response { + r := &SASLAuthenticateResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *SASLAuthenticateRequest) RequestWith(ctx context.Context, r Requestor) (*SASLAuthenticateResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*SASLAuthenticateResponse) + return resp, err +} + +func (v *SASLAuthenticateRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.SASLAuthBytes + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *SASLAuthenticateRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SASLAuthenticateRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SASLAuthenticateRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.SASLAuthBytes = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrSASLAuthenticateRequest returns a pointer to a default SASLAuthenticateRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSASLAuthenticateRequest() *SASLAuthenticateRequest { + var v SASLAuthenticateRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SASLAuthenticateRequest. +func (v *SASLAuthenticateRequest) Default() { +} + +// NewSASLAuthenticateRequest returns a default SASLAuthenticateRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewSASLAuthenticateRequest() SASLAuthenticateRequest { + var v SASLAuthenticateRequest + v.Default() + return v +} + +// SASLAuthenticateResponse is returned for a SASLAuthenticateRequest. +type SASLAuthenticateResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is a potential error. + ErrorCode int16 + + // ErrorMessage can contain a message for an error. + ErrorMessage *string + + // SASLAuthBytes is the server challenge continuing SASL flow. + SASLAuthBytes []byte + + // SessionLifetimeMillis, added in Kafka 2.2.0, is how long the SASL + // authentication is valid for. This timeout is only enforced if the request + // was v1. After this timeout, Kafka expects the next bytes on the wire to + // begin reauthentication. Otherwise, Kafka closes the connection. + SessionLifetimeMillis int64 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*SASLAuthenticateResponse) Key() int16 { return 36 } +func (*SASLAuthenticateResponse) MaxVersion() int16 { return 2 } +func (v *SASLAuthenticateResponse) SetVersion(version int16) { v.Version = version } +func (v *SASLAuthenticateResponse) GetVersion() int16 { return v.Version } +func (v *SASLAuthenticateResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *SASLAuthenticateResponse) RequestKind() Request { + return &SASLAuthenticateRequest{Version: v.Version} +} + +func (v *SASLAuthenticateResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.SASLAuthBytes + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if version >= 1 { + v := v.SessionLifetimeMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *SASLAuthenticateResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SASLAuthenticateResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SASLAuthenticateResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.SASLAuthBytes = v + } + if version >= 1 { + v := b.Int64() + s.SessionLifetimeMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrSASLAuthenticateResponse returns a pointer to a default SASLAuthenticateResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSASLAuthenticateResponse() *SASLAuthenticateResponse { + var v SASLAuthenticateResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SASLAuthenticateResponse. +func (v *SASLAuthenticateResponse) Default() { +} + +// NewSASLAuthenticateResponse returns a default SASLAuthenticateResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewSASLAuthenticateResponse() SASLAuthenticateResponse { + var v SASLAuthenticateResponse + v.Default() + return v +} + +type CreatePartitionsRequestTopicAssignment struct { + // Replicas are replicas to assign a new partition to. + Replicas []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsRequestTopicAssignment. +func (v *CreatePartitionsRequestTopicAssignment) Default() { +} + +// NewCreatePartitionsRequestTopicAssignment returns a default CreatePartitionsRequestTopicAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsRequestTopicAssignment() CreatePartitionsRequestTopicAssignment { + var v CreatePartitionsRequestTopicAssignment + v.Default() + return v +} + +type CreatePartitionsRequestTopic struct { + // Topic is a topic for which to create additional partitions for. + Topic string + + // Count is the final count of partitions this topic must have after this + // request. This must be greater than the current number of partitions. + Count int32 + + // Assignment is a two-level array, the first corresponding to new + // partitions, the second contining broker IDs for where new partition + // replicas should live. + // + // The second level, the replicas, cannot have duplicate broker IDs (i.e. + // you cannot replicate a single partition twice on the same broker). + // Additionally, the number of replicas must match the current number of + // replicas per partition on the topic. + // + // The first level's length must be equal to the delta of Count and the + // current number of partitions. + Assignment []CreatePartitionsRequestTopicAssignment + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsRequestTopic. +func (v *CreatePartitionsRequestTopic) Default() { +} + +// NewCreatePartitionsRequestTopic returns a default CreatePartitionsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsRequestTopic() CreatePartitionsRequestTopic { + var v CreatePartitionsRequestTopic + v.Default() + return v +} + +// CreatePartitionsRequest creates additional partitions for topics. +type CreatePartitionsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics contains topics to create partitions for. + Topics []CreatePartitionsRequestTopic + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 15000. + TimeoutMillis int32 + + // ValidateOnly is makes this request a dry-run; everything is validated but + // no partitions are actually created. + ValidateOnly bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreatePartitionsRequest) Key() int16 { return 37 } +func (*CreatePartitionsRequest) MaxVersion() int16 { return 3 } +func (v *CreatePartitionsRequest) SetVersion(version int16) { v.Version = version } +func (v *CreatePartitionsRequest) GetVersion() int16 { return v.Version } +func (v *CreatePartitionsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *CreatePartitionsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *CreatePartitionsRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *CreatePartitionsRequest) IsAdminRequest() {} +func (v *CreatePartitionsRequest) ResponseKind() Response { + r := &CreatePartitionsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *CreatePartitionsRequest) RequestWith(ctx context.Context, r Requestor) (*CreatePartitionsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*CreatePartitionsResponse) + return resp, err +} + +func (v *CreatePartitionsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Count + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Assignment + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreatePartitionsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreatePartitionsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreatePartitionsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreatePartitionsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Count = v + } + { + v := s.Assignment + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []CreatePartitionsRequestTopicAssignment{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreatePartitionsRequestTopicAssignment, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Assignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreatePartitionsRequest returns a pointer to a default CreatePartitionsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreatePartitionsRequest() *CreatePartitionsRequest { + var v CreatePartitionsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsRequest. +func (v *CreatePartitionsRequest) Default() { + v.TimeoutMillis = 15000 +} + +// NewCreatePartitionsRequest returns a default CreatePartitionsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsRequest() CreatePartitionsRequest { + var v CreatePartitionsRequest + v.Default() + return v +} + +type CreatePartitionsResponseTopic struct { + // Topic is the topic that partitions were requested to be made for. + Topic string + + // ErrorCode is the error code returned for each topic in the request. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // to create partitions for a topic. + // + // INVALID_REQUEST is returned for duplicate topics in the request. + // + // INVALID_TOPIC_EXCEPTION is returned if the topic is queued for deletion. + // + // REASSIGNMENT_IN_PROGRESS is returned if the request was issued while + // partitions were being reassigned. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the topic for which to create partitions. + // + // INVALID_PARTITIONS is returned if the request would drop the total + // count of partitions down, or if the request would not add any more + // partitions, or if the request uses unknown brokers, or if the request + // assigns a different number of brokers than the increase in the + // partition count. + ErrorCode int16 + + // ErrorMessage is an informative message if the topic creation failed. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsResponseTopic. +func (v *CreatePartitionsResponseTopic) Default() { +} + +// NewCreatePartitionsResponseTopic returns a default CreatePartitionsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsResponseTopic() CreatePartitionsResponseTopic { + var v CreatePartitionsResponseTopic + v.Default() + return v +} + +// CreatePartitionsResponse is returned from a CreatePartitionsRequest. +type CreatePartitionsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Topics is a response to each topic in the creation request. + Topics []CreatePartitionsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreatePartitionsResponse) Key() int16 { return 37 } +func (*CreatePartitionsResponse) MaxVersion() int16 { return 3 } +func (v *CreatePartitionsResponse) SetVersion(version int16) { v.Version = version } +func (v *CreatePartitionsResponse) GetVersion() int16 { return v.Version } +func (v *CreatePartitionsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *CreatePartitionsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *CreatePartitionsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *CreatePartitionsResponse) RequestKind() Request { + return &CreatePartitionsRequest{Version: v.Version} +} + +func (v *CreatePartitionsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreatePartitionsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreatePartitionsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreatePartitionsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreatePartitionsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreatePartitionsResponse returns a pointer to a default CreatePartitionsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreatePartitionsResponse() *CreatePartitionsResponse { + var v CreatePartitionsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsResponse. +func (v *CreatePartitionsResponse) Default() { +} + +// NewCreatePartitionsResponse returns a default CreatePartitionsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsResponse() CreatePartitionsResponse { + var v CreatePartitionsResponse + v.Default() + return v +} + +type CreateDelegationTokenRequestRenewer struct { + // PrincipalType is the "type" this principal is. This must be "User". + PrincipalType string + + // PrincipalName is the user name allowed to renew the returned token. + PrincipalName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateDelegationTokenRequestRenewer. +func (v *CreateDelegationTokenRequestRenewer) Default() { +} + +// NewCreateDelegationTokenRequestRenewer returns a default CreateDelegationTokenRequestRenewer +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateDelegationTokenRequestRenewer() CreateDelegationTokenRequestRenewer { + var v CreateDelegationTokenRequestRenewer + v.Default() + return v +} + +// CreateDelegationTokenRequest issues a request to create a delegation token. +// +// Creating delegation tokens allows for an (ideally) quicker and easier method +// of enabling authorization for a wide array of clients. Rather than having to +// manage many passwords external to Kafka, you only need to manage a few +// accounts and use those to create delegation tokens per client. +// +// Note that delegation tokens inherit the same ACLs as the user creating the +// token. Thus, if you want to properly scope ACLs, you should not create +// delegation tokens with admin accounts. +// +// Delegation tokens live inside of Kafka and use SASL SCRAM-SHA-256 for +// authorization. +type CreateDelegationTokenRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The principal type of the owner of the token. If null, this defaults + // to the token request principal. + OwnerPrincipalType *string // v3+ + + // Principal name of the owner of the token. If null, this defaults to + // the token request principal. + OwnerPrincipalName *string // v3+ + + // Renewers is a list of who can renew this delegation token. If empty, the + // default is the principal (user) who created the token. + Renewers []CreateDelegationTokenRequestRenewer + + // MaxLifetimeMillis is how long this delegation token will be valid for. + // If -1, the default will be the server's delegation.token.max.lifetime.ms. + MaxLifetimeMillis int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreateDelegationTokenRequest) Key() int16 { return 38 } +func (*CreateDelegationTokenRequest) MaxVersion() int16 { return 3 } +func (v *CreateDelegationTokenRequest) SetVersion(version int16) { v.Version = version } +func (v *CreateDelegationTokenRequest) GetVersion() int16 { return v.Version } +func (v *CreateDelegationTokenRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *CreateDelegationTokenRequest) ResponseKind() Response { + r := &CreateDelegationTokenResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *CreateDelegationTokenRequest) RequestWith(ctx context.Context, r Requestor) (*CreateDelegationTokenResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*CreateDelegationTokenResponse) + return resp, err +} + +func (v *CreateDelegationTokenRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + if version >= 3 { + v := v.OwnerPrincipalType + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 3 { + v := v.OwnerPrincipalName + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Renewers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.MaxLifetimeMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateDelegationTokenRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateDelegationTokenRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateDelegationTokenRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.OwnerPrincipalType = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.OwnerPrincipalName = v + } + { + v := s.Renewers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateDelegationTokenRequestRenewer, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Renewers = v + } + { + v := b.Int64() + s.MaxLifetimeMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateDelegationTokenRequest returns a pointer to a default CreateDelegationTokenRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateDelegationTokenRequest() *CreateDelegationTokenRequest { + var v CreateDelegationTokenRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateDelegationTokenRequest. +func (v *CreateDelegationTokenRequest) Default() { +} + +// NewCreateDelegationTokenRequest returns a default CreateDelegationTokenRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateDelegationTokenRequest() CreateDelegationTokenRequest { + var v CreateDelegationTokenRequest + v.Default() + return v +} + +// CreateDelegationTokenResponse is a response to a CreateDelegationTokenRequest. +type CreateDelegationTokenResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any error that caused the request to fail. + ErrorCode int16 + + // PrincipalType is the type of principal that granted this delegation token. + // This will always be "User" with the simple authorizer. + PrincipalType string + + // PrincipalName is the name of the principal that granted this delegation + // token. + PrincipalName string + + // The principal type of the requester of the token. + TokenRequesterPrincipalType string // v3+ + + // The principal name of the requester token. + TokenRequesterPrincipalName string // v3+ + + // IssueTimestamp is the millisecond timestamp this delegation token was + // issued. + IssueTimestamp int64 + + // ExpiryTimestamp is the millisecond timestamp this token will expire. The + // token can be renewed up to MaxTimestamp, past which point, it will be + // invalid. The Kafka default is 24h. + ExpiryTimestamp int64 + + // MaxTimestamp is the millisecond timestamp past which this token cannot + // be renewed. + MaxTimestamp int64 + + // TokenID is the ID of this token; this will be used as the username for + // scram authentication. + TokenID string + + // HMAC is the password of this token; this will be used as the password for + // scram authentication. + HMAC []byte + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreateDelegationTokenResponse) Key() int16 { return 38 } +func (*CreateDelegationTokenResponse) MaxVersion() int16 { return 3 } +func (v *CreateDelegationTokenResponse) SetVersion(version int16) { v.Version = version } +func (v *CreateDelegationTokenResponse) GetVersion() int16 { return v.Version } +func (v *CreateDelegationTokenResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *CreateDelegationTokenResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *CreateDelegationTokenResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *CreateDelegationTokenResponse) RequestKind() Request { + return &CreateDelegationTokenRequest{Version: v.Version} +} + +func (v *CreateDelegationTokenResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.TokenRequesterPrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.TokenRequesterPrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.IssueTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ExpiryTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MaxTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.TokenID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.HMAC + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateDelegationTokenResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateDelegationTokenResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateDelegationTokenResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenRequesterPrincipalType = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenRequesterPrincipalName = v + } + { + v := b.Int64() + s.IssueTimestamp = v + } + { + v := b.Int64() + s.ExpiryTimestamp = v + } + { + v := b.Int64() + s.MaxTimestamp = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenID = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.HMAC = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateDelegationTokenResponse returns a pointer to a default CreateDelegationTokenResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateDelegationTokenResponse() *CreateDelegationTokenResponse { + var v CreateDelegationTokenResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateDelegationTokenResponse. +func (v *CreateDelegationTokenResponse) Default() { +} + +// NewCreateDelegationTokenResponse returns a default CreateDelegationTokenResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateDelegationTokenResponse() CreateDelegationTokenResponse { + var v CreateDelegationTokenResponse + v.Default() + return v +} + +// RenewDelegationTokenRequest is a request to renew a delegation token that +// has not yet hit its max timestamp. Note that a client using a token cannot +// renew its own token. +type RenewDelegationTokenRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // HMAC is the HMAC of the token to be renewed. + HMAC []byte + + // RenewTimeMillis is how long to renew the token for. If -1, Kafka uses its + // delegation.token.max.lifetime.ms. + RenewTimeMillis int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*RenewDelegationTokenRequest) Key() int16 { return 39 } +func (*RenewDelegationTokenRequest) MaxVersion() int16 { return 2 } +func (v *RenewDelegationTokenRequest) SetVersion(version int16) { v.Version = version } +func (v *RenewDelegationTokenRequest) GetVersion() int16 { return v.Version } +func (v *RenewDelegationTokenRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *RenewDelegationTokenRequest) ResponseKind() Response { + r := &RenewDelegationTokenResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *RenewDelegationTokenRequest) RequestWith(ctx context.Context, r Requestor) (*RenewDelegationTokenResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*RenewDelegationTokenResponse) + return resp, err +} + +func (v *RenewDelegationTokenRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.HMAC + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.RenewTimeMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *RenewDelegationTokenRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *RenewDelegationTokenRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *RenewDelegationTokenRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.HMAC = v + } + { + v := b.Int64() + s.RenewTimeMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrRenewDelegationTokenRequest returns a pointer to a default RenewDelegationTokenRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrRenewDelegationTokenRequest() *RenewDelegationTokenRequest { + var v RenewDelegationTokenRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to RenewDelegationTokenRequest. +func (v *RenewDelegationTokenRequest) Default() { +} + +// NewRenewDelegationTokenRequest returns a default RenewDelegationTokenRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewRenewDelegationTokenRequest() RenewDelegationTokenRequest { + var v RenewDelegationTokenRequest + v.Default() + return v +} + +// RenewDelegationTokenResponse is a response to a RenewDelegationTokenRequest. +type RenewDelegationTokenResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any error that caused the request to fail. + ErrorCode int16 + + // ExpiryTimestamp is the millisecond timestamp this token will expire. The + // token can be renewed up to MaxTimestamp, past which point, it will be + // invalid. The Kafka default is 24h. + ExpiryTimestamp int64 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*RenewDelegationTokenResponse) Key() int16 { return 39 } +func (*RenewDelegationTokenResponse) MaxVersion() int16 { return 2 } +func (v *RenewDelegationTokenResponse) SetVersion(version int16) { v.Version = version } +func (v *RenewDelegationTokenResponse) GetVersion() int16 { return v.Version } +func (v *RenewDelegationTokenResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *RenewDelegationTokenResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *RenewDelegationTokenResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *RenewDelegationTokenResponse) RequestKind() Request { + return &RenewDelegationTokenRequest{Version: v.Version} +} + +func (v *RenewDelegationTokenResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ExpiryTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *RenewDelegationTokenResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *RenewDelegationTokenResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *RenewDelegationTokenResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.ExpiryTimestamp = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrRenewDelegationTokenResponse returns a pointer to a default RenewDelegationTokenResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrRenewDelegationTokenResponse() *RenewDelegationTokenResponse { + var v RenewDelegationTokenResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to RenewDelegationTokenResponse. +func (v *RenewDelegationTokenResponse) Default() { +} + +// NewRenewDelegationTokenResponse returns a default RenewDelegationTokenResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewRenewDelegationTokenResponse() RenewDelegationTokenResponse { + var v RenewDelegationTokenResponse + v.Default() + return v +} + +// ExpireDelegationTokenRequest is a request to change the expiry timestamp +// of a delegation token. Note that a client using a token cannot expire its +// own token. +type ExpireDelegationTokenRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // HMAC is the HMAC of the token to change the expiry timestamp of. + HMAC []byte + + // ExpiryPeriodMillis changes the delegation token's expiry timestamp to + // now + expiry time millis. This can be used to force tokens to expire + // quickly, or to allow tokens a grace period before expiry. You cannot + // add enough expiry that exceeds the original max timestamp. + ExpiryPeriodMillis int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*ExpireDelegationTokenRequest) Key() int16 { return 40 } +func (*ExpireDelegationTokenRequest) MaxVersion() int16 { return 2 } +func (v *ExpireDelegationTokenRequest) SetVersion(version int16) { v.Version = version } +func (v *ExpireDelegationTokenRequest) GetVersion() int16 { return v.Version } +func (v *ExpireDelegationTokenRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *ExpireDelegationTokenRequest) ResponseKind() Response { + r := &ExpireDelegationTokenResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ExpireDelegationTokenRequest) RequestWith(ctx context.Context, r Requestor) (*ExpireDelegationTokenResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ExpireDelegationTokenResponse) + return resp, err +} + +func (v *ExpireDelegationTokenRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.HMAC + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.ExpiryPeriodMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ExpireDelegationTokenRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ExpireDelegationTokenRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ExpireDelegationTokenRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.HMAC = v + } + { + v := b.Int64() + s.ExpiryPeriodMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrExpireDelegationTokenRequest returns a pointer to a default ExpireDelegationTokenRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrExpireDelegationTokenRequest() *ExpireDelegationTokenRequest { + var v ExpireDelegationTokenRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ExpireDelegationTokenRequest. +func (v *ExpireDelegationTokenRequest) Default() { +} + +// NewExpireDelegationTokenRequest returns a default ExpireDelegationTokenRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewExpireDelegationTokenRequest() ExpireDelegationTokenRequest { + var v ExpireDelegationTokenRequest + v.Default() + return v +} + +// ExpireDelegationTokenResponse is a response to an ExpireDelegationTokenRequest. +type ExpireDelegationTokenResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any error that caused the request to fail. + ErrorCode int16 + + // ExpiryTimestamp is the new timestamp at which the delegation token will + // expire. + ExpiryTimestamp int64 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*ExpireDelegationTokenResponse) Key() int16 { return 40 } +func (*ExpireDelegationTokenResponse) MaxVersion() int16 { return 2 } +func (v *ExpireDelegationTokenResponse) SetVersion(version int16) { v.Version = version } +func (v *ExpireDelegationTokenResponse) GetVersion() int16 { return v.Version } +func (v *ExpireDelegationTokenResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *ExpireDelegationTokenResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *ExpireDelegationTokenResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ExpireDelegationTokenResponse) RequestKind() Request { + return &ExpireDelegationTokenRequest{Version: v.Version} +} + +func (v *ExpireDelegationTokenResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ExpiryTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ExpireDelegationTokenResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ExpireDelegationTokenResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ExpireDelegationTokenResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.ExpiryTimestamp = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrExpireDelegationTokenResponse returns a pointer to a default ExpireDelegationTokenResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrExpireDelegationTokenResponse() *ExpireDelegationTokenResponse { + var v ExpireDelegationTokenResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ExpireDelegationTokenResponse. +func (v *ExpireDelegationTokenResponse) Default() { +} + +// NewExpireDelegationTokenResponse returns a default ExpireDelegationTokenResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewExpireDelegationTokenResponse() ExpireDelegationTokenResponse { + var v ExpireDelegationTokenResponse + v.Default() + return v +} + +type DescribeDelegationTokenRequestOwner struct { + // PrincipalType is a type to match to describe delegation tokens created + // with this principal. This would be "User" with the simple authorizer. + PrincipalType string + + // PrincipalName is the name to match to describe delegation tokens created + // with this principal. + PrincipalName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenRequestOwner. +func (v *DescribeDelegationTokenRequestOwner) Default() { +} + +// NewDescribeDelegationTokenRequestOwner returns a default DescribeDelegationTokenRequestOwner +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenRequestOwner() DescribeDelegationTokenRequestOwner { + var v DescribeDelegationTokenRequestOwner + v.Default() + return v +} + +// DescribeDelegationTokenRequest is a request to describe delegation tokens. +type DescribeDelegationTokenRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Owners contains owners to describe delegation tokens for, or null for all. + // If non-null, only tokens created from a matching principal type, name + // combination are printed. + Owners []DescribeDelegationTokenRequestOwner + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeDelegationTokenRequest) Key() int16 { return 41 } +func (*DescribeDelegationTokenRequest) MaxVersion() int16 { return 3 } +func (v *DescribeDelegationTokenRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeDelegationTokenRequest) GetVersion() int16 { return v.Version } +func (v *DescribeDelegationTokenRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeDelegationTokenRequest) ResponseKind() Response { + r := &DescribeDelegationTokenResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeDelegationTokenRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeDelegationTokenResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeDelegationTokenResponse) + return resp, err +} + +func (v *DescribeDelegationTokenRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Owners + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeDelegationTokenRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeDelegationTokenRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeDelegationTokenRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Owners + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []DescribeDelegationTokenRequestOwner{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeDelegationTokenRequestOwner, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Owners = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeDelegationTokenRequest returns a pointer to a default DescribeDelegationTokenRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeDelegationTokenRequest() *DescribeDelegationTokenRequest { + var v DescribeDelegationTokenRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenRequest. +func (v *DescribeDelegationTokenRequest) Default() { +} + +// NewDescribeDelegationTokenRequest returns a default DescribeDelegationTokenRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenRequest() DescribeDelegationTokenRequest { + var v DescribeDelegationTokenRequest + v.Default() + return v +} + +type DescribeDelegationTokenResponseTokenDetailRenewer struct { + PrincipalType string + + PrincipalName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenResponseTokenDetailRenewer. +func (v *DescribeDelegationTokenResponseTokenDetailRenewer) Default() { +} + +// NewDescribeDelegationTokenResponseTokenDetailRenewer returns a default DescribeDelegationTokenResponseTokenDetailRenewer +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenResponseTokenDetailRenewer() DescribeDelegationTokenResponseTokenDetailRenewer { + var v DescribeDelegationTokenResponseTokenDetailRenewer + v.Default() + return v +} + +type DescribeDelegationTokenResponseTokenDetail struct { + // PrincipalType is the principal type of who created this token. + PrincipalType string + + // PrincipalName is the principal name of who created this token. + PrincipalName string + + // The principal type of the requester of the token. + TokenRequesterPrincipalType string // v3+ + + // The principal name of the requester token. + TokenRequesterPrincipalName string // v3+ + + // IssueTimestamp is the millisecond timestamp of when this token was issued. + IssueTimestamp int64 + + // ExpiryTimestamp is the millisecond timestamp of when this token will expire. + ExpiryTimestamp int64 + + // MaxTimestamp is the millisecond timestamp past which whis token cannot + // be renewed. + MaxTimestamp int64 + + // TokenID is the ID (scram username) of this token. + TokenID string + + // HMAC is the password of this token. + HMAC []byte + + // Renewers is a list of users that can renew this token. + Renewers []DescribeDelegationTokenResponseTokenDetailRenewer + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenResponseTokenDetail. +func (v *DescribeDelegationTokenResponseTokenDetail) Default() { +} + +// NewDescribeDelegationTokenResponseTokenDetail returns a default DescribeDelegationTokenResponseTokenDetail +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenResponseTokenDetail() DescribeDelegationTokenResponseTokenDetail { + var v DescribeDelegationTokenResponseTokenDetail + v.Default() + return v +} + +// DescribeDelegationTokenResponsee is a response to a DescribeDelegationTokenRequest. +type DescribeDelegationTokenResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any error that caused the request to fail. + ErrorCode int16 + + // TokenDetails shows information about each token created from any principal + // in the request. + TokenDetails []DescribeDelegationTokenResponseTokenDetail + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeDelegationTokenResponse) Key() int16 { return 41 } +func (*DescribeDelegationTokenResponse) MaxVersion() int16 { return 3 } +func (v *DescribeDelegationTokenResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeDelegationTokenResponse) GetVersion() int16 { return v.Version } +func (v *DescribeDelegationTokenResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeDelegationTokenResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *DescribeDelegationTokenResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeDelegationTokenResponse) RequestKind() Request { + return &DescribeDelegationTokenRequest{Version: v.Version} +} + +func (v *DescribeDelegationTokenResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TokenDetails + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.TokenRequesterPrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.TokenRequesterPrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.IssueTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ExpiryTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MaxTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.TokenID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.HMAC + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.Renewers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeDelegationTokenResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeDelegationTokenResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeDelegationTokenResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.TokenDetails + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeDelegationTokenResponseTokenDetail, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenRequesterPrincipalType = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenRequesterPrincipalName = v + } + { + v := b.Int64() + s.IssueTimestamp = v + } + { + v := b.Int64() + s.ExpiryTimestamp = v + } + { + v := b.Int64() + s.MaxTimestamp = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenID = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.HMAC = v + } + { + v := s.Renewers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeDelegationTokenResponseTokenDetailRenewer, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Renewers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TokenDetails = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeDelegationTokenResponse returns a pointer to a default DescribeDelegationTokenResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeDelegationTokenResponse() *DescribeDelegationTokenResponse { + var v DescribeDelegationTokenResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenResponse. +func (v *DescribeDelegationTokenResponse) Default() { +} + +// NewDescribeDelegationTokenResponse returns a default DescribeDelegationTokenResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenResponse() DescribeDelegationTokenResponse { + var v DescribeDelegationTokenResponse + v.Default() + return v +} + +// DeleteGroupsRequest deletes consumer groups. This request was added for +// Kafka 1.1.0 corresponding to the removal of RetentionTimeMillis from +// OffsetCommitRequest. See KIP-229 for more details. +type DeleteGroupsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Groups is a list of groups to delete. + Groups []string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteGroupsRequest) Key() int16 { return 42 } +func (*DeleteGroupsRequest) MaxVersion() int16 { return 2 } +func (v *DeleteGroupsRequest) SetVersion(version int16) { v.Version = version } +func (v *DeleteGroupsRequest) GetVersion() int16 { return v.Version } +func (v *DeleteGroupsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteGroupsRequest) IsGroupCoordinatorRequest() {} +func (v *DeleteGroupsRequest) ResponseKind() Response { + r := &DeleteGroupsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DeleteGroupsRequest) RequestWith(ctx context.Context, r Requestor) (*DeleteGroupsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DeleteGroupsResponse) + return resp, err +} + +func (v *DeleteGroupsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteGroupsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteGroupsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteGroupsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteGroupsRequest returns a pointer to a default DeleteGroupsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteGroupsRequest() *DeleteGroupsRequest { + var v DeleteGroupsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteGroupsRequest. +func (v *DeleteGroupsRequest) Default() { +} + +// NewDeleteGroupsRequest returns a default DeleteGroupsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteGroupsRequest() DeleteGroupsRequest { + var v DeleteGroupsRequest + v.Default() + return v +} + +type DeleteGroupsResponseGroup struct { + // Group is a group ID requested for deletion. + Group string + + // ErrorCode is the error code returned for this group's deletion request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to delete a group. + // + // INVALID_GROUP_ID is returned if the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator for this + // group is not yet active. + // + // GROUP_ID_NOT_FOUND is returned if the group ID does not exist. + // + // NON_EMPTY_GROUP is returned if attempting to delete a group that is + // not in the empty state. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteGroupsResponseGroup. +func (v *DeleteGroupsResponseGroup) Default() { +} + +// NewDeleteGroupsResponseGroup returns a default DeleteGroupsResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteGroupsResponseGroup() DeleteGroupsResponseGroup { + var v DeleteGroupsResponseGroup + v.Default() + return v +} + +// DeleteGroupsResponse is returned from a DeleteGroupsRequest. +type DeleteGroupsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Groups are the responses to each group requested for deletion. + Groups []DeleteGroupsResponseGroup + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteGroupsResponse) Key() int16 { return 42 } +func (*DeleteGroupsResponse) MaxVersion() int16 { return 2 } +func (v *DeleteGroupsResponse) SetVersion(version int16) { v.Version = version } +func (v *DeleteGroupsResponse) GetVersion() int16 { return v.Version } +func (v *DeleteGroupsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteGroupsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DeleteGroupsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DeleteGroupsResponse) RequestKind() Request { return &DeleteGroupsRequest{Version: v.Version} } + +func (v *DeleteGroupsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteGroupsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteGroupsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteGroupsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteGroupsResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteGroupsResponse returns a pointer to a default DeleteGroupsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteGroupsResponse() *DeleteGroupsResponse { + var v DeleteGroupsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteGroupsResponse. +func (v *DeleteGroupsResponse) Default() { +} + +// NewDeleteGroupsResponse returns a default DeleteGroupsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteGroupsResponse() DeleteGroupsResponse { + var v DeleteGroupsResponse + v.Default() + return v +} + +type ElectLeadersRequestTopic struct { + // Topic is a topic to trigger leader elections for (but only for the + // partitions below). + Topic string + + // Partitions is an array of partitions in a topic to trigger leader + // elections for. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersRequestTopic. +func (v *ElectLeadersRequestTopic) Default() { +} + +// NewElectLeadersRequestTopic returns a default ElectLeadersRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersRequestTopic() ElectLeadersRequestTopic { + var v ElectLeadersRequestTopic + v.Default() + return v +} + +// ElectLeadersRequest begins a leader election for all given topic +// partitions. This request was added in Kafka 2.2.0 to replace the zookeeper +// only option of triggering leader elections before. See KIP-183 for more +// details. KIP-460 introduced the ElectionType field with Kafka 2.4.0. +type ElectLeadersRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ElectionType is the type of election to conduct. 0 elects the preferred + // replica, 1 elects the first live replica if there are no in-sync replicas + // (i.e., unclean leader election). + ElectionType int8 // v1+ + + // Topics is an array of topics and corresponding partitions to + // trigger leader elections for, or null for all. + Topics []ElectLeadersRequestTopic + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*ElectLeadersRequest) Key() int16 { return 43 } +func (*ElectLeadersRequest) MaxVersion() int16 { return 2 } +func (v *ElectLeadersRequest) SetVersion(version int16) { v.Version = version } +func (v *ElectLeadersRequest) GetVersion() int16 { return v.Version } +func (v *ElectLeadersRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *ElectLeadersRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *ElectLeadersRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *ElectLeadersRequest) IsAdminRequest() {} +func (v *ElectLeadersRequest) ResponseKind() Response { + r := &ElectLeadersResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ElectLeadersRequest) RequestWith(ctx context.Context, r Requestor) (*ElectLeadersResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ElectLeadersResponse) + return resp, err +} + +func (v *ElectLeadersRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + if version >= 1 { + v := v.ElectionType + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ElectLeadersRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ElectLeadersRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ElectLeadersRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int8() + s.ElectionType = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []ElectLeadersRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ElectLeadersRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrElectLeadersRequest returns a pointer to a default ElectLeadersRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrElectLeadersRequest() *ElectLeadersRequest { + var v ElectLeadersRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersRequest. +func (v *ElectLeadersRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewElectLeadersRequest returns a default ElectLeadersRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersRequest() ElectLeadersRequest { + var v ElectLeadersRequest + v.Default() + return v +} + +type ElectLeadersResponseTopicPartition struct { + // Partition is the partition for this result. + Partition int32 + + // ErrorCode is the error code returned for this topic/partition leader + // election. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to trigger leader elections. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic/partition does + // not exist on any broker in the cluster (this is slightly different + // from the usual meaning of a single broker not knowing of the topic + // partition). + // + // PREFERRED_LEADER_NOT_AVAILABLE is returned if the preferred leader + // could not be elected (for example, the preferred leader was not in + // the ISR). + ErrorCode int16 + + // ErrorMessage is an informative message if the leader election failed. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersResponseTopicPartition. +func (v *ElectLeadersResponseTopicPartition) Default() { +} + +// NewElectLeadersResponseTopicPartition returns a default ElectLeadersResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersResponseTopicPartition() ElectLeadersResponseTopicPartition { + var v ElectLeadersResponseTopicPartition + v.Default() + return v +} + +type ElectLeadersResponseTopic struct { + // Topic is topic for the given partition results below. + Topic string + + // Partitions contains election results for a topic's partitions. + Partitions []ElectLeadersResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersResponseTopic. +func (v *ElectLeadersResponseTopic) Default() { +} + +// NewElectLeadersResponseTopic returns a default ElectLeadersResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersResponseTopic() ElectLeadersResponseTopic { + var v ElectLeadersResponseTopic + v.Default() + return v +} + +// ElectLeadersResponse is a response for an ElectLeadersRequest. +type ElectLeadersResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is any error that applies to all partitions. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to reassign partitions. + ErrorCode int16 // v1+ + + // Topics contains leader election results for each requested topic. + Topics []ElectLeadersResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*ElectLeadersResponse) Key() int16 { return 43 } +func (*ElectLeadersResponse) MaxVersion() int16 { return 2 } +func (v *ElectLeadersResponse) SetVersion(version int16) { v.Version = version } +func (v *ElectLeadersResponse) GetVersion() int16 { return v.Version } +func (v *ElectLeadersResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *ElectLeadersResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *ElectLeadersResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ElectLeadersResponse) RequestKind() Request { return &ElectLeadersRequest{Version: v.Version} } + +func (v *ElectLeadersResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ElectLeadersResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ElectLeadersResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ElectLeadersResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 1 { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ElectLeadersResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ElectLeadersResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrElectLeadersResponse returns a pointer to a default ElectLeadersResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrElectLeadersResponse() *ElectLeadersResponse { + var v ElectLeadersResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersResponse. +func (v *ElectLeadersResponse) Default() { +} + +// NewElectLeadersResponse returns a default ElectLeadersResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersResponse() ElectLeadersResponse { + var v ElectLeadersResponse + v.Default() + return v +} + +type IncrementalAlterConfigsRequestResourceConfig struct { + // Name is a key to modify (e.g. segment.bytes). + // + // For broker loggers, see KIP-412 section "Request/Response Overview" + // for details on how to change per logger log levels. + Name string + + // Op is the type of operation to perform for this config name. + // + // SET (0) is to set a configuration value; the value must not be null. + // + // DELETE (1) is to delete a configuration key. + // + // APPEND (2) is to add a value to the list of values for a key (if the + // key is for a list of values). + // + // SUBTRACT (3) is to remove a value from a list of values (if the key + // is for a list of values). + Op IncrementalAlterConfigOp + + // Value is a value to set for the key (e.g. 10). + Value *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsRequestResourceConfig. +func (v *IncrementalAlterConfigsRequestResourceConfig) Default() { +} + +// NewIncrementalAlterConfigsRequestResourceConfig returns a default IncrementalAlterConfigsRequestResourceConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsRequestResourceConfig() IncrementalAlterConfigsRequestResourceConfig { + var v IncrementalAlterConfigsRequestResourceConfig + v.Default() + return v +} + +type IncrementalAlterConfigsRequestResource struct { + // ResourceType is an enum corresponding to the type of config to alter. + ResourceType ConfigResourceType + + // ResourceName is the name of config to alter. + // + // If the requested type is a topic, this corresponds to a topic name. + // + // If the requested type if a broker, this should either be empty or be + // the ID of the broker this request is issued to. If it is empty, this + // updates all broker configs. If a specific ID, this updates just the + // broker. Using a specific ID also ensures that brokers reload config + // or secret files even if the file path has not changed. Lastly, password + // config options can only be defined on a per broker basis. + // + // If the type is broker logger, this must be a broker ID. + ResourceName string + + // Configs contains key/value config pairs to set on the resource. + Configs []IncrementalAlterConfigsRequestResourceConfig + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsRequestResource. +func (v *IncrementalAlterConfigsRequestResource) Default() { +} + +// NewIncrementalAlterConfigsRequestResource returns a default IncrementalAlterConfigsRequestResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsRequestResource() IncrementalAlterConfigsRequestResource { + var v IncrementalAlterConfigsRequestResource + v.Default() + return v +} + +// IncrementalAlterConfigsRequest issues ar equest to alter either topic or +// broker configs. +// +// This API was added in Kafka 2.3.0 to replace AlterConfigs. The key benefit +// of this API is that consumers do not need to know the full config state +// to add or remove new config options. See KIP-339 for more details. +type IncrementalAlterConfigsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Resources is an array of configs to alter. + Resources []IncrementalAlterConfigsRequestResource + + // ValidateOnly validates the request but does not apply it. + ValidateOnly bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*IncrementalAlterConfigsRequest) Key() int16 { return 44 } +func (*IncrementalAlterConfigsRequest) MaxVersion() int16 { return 1 } +func (v *IncrementalAlterConfigsRequest) SetVersion(version int16) { v.Version = version } +func (v *IncrementalAlterConfigsRequest) GetVersion() int16 { return v.Version } +func (v *IncrementalAlterConfigsRequest) IsFlexible() bool { return v.Version >= 1 } +func (v *IncrementalAlterConfigsRequest) ResponseKind() Response { + r := &IncrementalAlterConfigsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *IncrementalAlterConfigsRequest) RequestWith(ctx context.Context, r Requestor) (*IncrementalAlterConfigsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*IncrementalAlterConfigsResponse) + return resp, err +} + +func (v *IncrementalAlterConfigsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Op + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *IncrementalAlterConfigsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *IncrementalAlterConfigsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *IncrementalAlterConfigsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]IncrementalAlterConfigsRequestResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]IncrementalAlterConfigsRequestResourceConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var t IncrementalAlterConfigOp + { + v := b.Int8() + t = IncrementalAlterConfigOp(v) + } + v := t + s.Op = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrIncrementalAlterConfigsRequest returns a pointer to a default IncrementalAlterConfigsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrIncrementalAlterConfigsRequest() *IncrementalAlterConfigsRequest { + var v IncrementalAlterConfigsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsRequest. +func (v *IncrementalAlterConfigsRequest) Default() { +} + +// NewIncrementalAlterConfigsRequest returns a default IncrementalAlterConfigsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsRequest() IncrementalAlterConfigsRequest { + var v IncrementalAlterConfigsRequest + v.Default() + return v +} + +type IncrementalAlterConfigsResponseResource struct { + // ErrorCode is the error code returned for incrementally altering configs. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if asking to alter broker + // configs but the client is not authorized to do so. + // + // TOPIC_AUTHORIZATION_FAILED is returned if asking to alter topic + // configs but the client is not authorized to do so. + // + // INVALID_TOPIC_EXCEPTION is returned if the requested topic was invalid. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic. + // + // INVALID_REQUEST is returned if the requested config is invalid or if + // asking Kafka to alter an invalid resource. + ErrorCode int16 + + // ErrorMessage is an informative message if the incremental alter config failed. + ErrorMessage *string + + // ResourceType is the enum corresponding to the type of altered config. + ResourceType ConfigResourceType + + // ResourceName is the name corresponding to the incremental alter config + // request. + ResourceName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsResponseResource. +func (v *IncrementalAlterConfigsResponseResource) Default() { +} + +// NewIncrementalAlterConfigsResponseResource returns a default IncrementalAlterConfigsResponseResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsResponseResource() IncrementalAlterConfigsResponseResource { + var v IncrementalAlterConfigsResponseResource + v.Default() + return v +} + +// IncrementalAlterConfigsResponse is returned from an IncrementalAlterConfigsRequest. +type IncrementalAlterConfigsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Resources are responses for each resources in the alter request. + Resources []IncrementalAlterConfigsResponseResource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*IncrementalAlterConfigsResponse) Key() int16 { return 44 } +func (*IncrementalAlterConfigsResponse) MaxVersion() int16 { return 1 } +func (v *IncrementalAlterConfigsResponse) SetVersion(version int16) { v.Version = version } +func (v *IncrementalAlterConfigsResponse) GetVersion() int16 { return v.Version } +func (v *IncrementalAlterConfigsResponse) IsFlexible() bool { return v.Version >= 1 } +func (v *IncrementalAlterConfigsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *IncrementalAlterConfigsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *IncrementalAlterConfigsResponse) RequestKind() Request { + return &IncrementalAlterConfigsRequest{Version: v.Version} +} + +func (v *IncrementalAlterConfigsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *IncrementalAlterConfigsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *IncrementalAlterConfigsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *IncrementalAlterConfigsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]IncrementalAlterConfigsResponseResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrIncrementalAlterConfigsResponse returns a pointer to a default IncrementalAlterConfigsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrIncrementalAlterConfigsResponse() *IncrementalAlterConfigsResponse { + var v IncrementalAlterConfigsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsResponse. +func (v *IncrementalAlterConfigsResponse) Default() { +} + +// NewIncrementalAlterConfigsResponse returns a default IncrementalAlterConfigsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsResponse() IncrementalAlterConfigsResponse { + var v IncrementalAlterConfigsResponse + v.Default() + return v +} + +type AlterPartitionAssignmentsRequestTopicPartition struct { + // Partition is a partition to reassign. + Partition int32 + + // Replicas are replicas to place the partition on, or null to + // cancel a pending reassignment of this partition. + Replicas []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsRequestTopicPartition. +func (v *AlterPartitionAssignmentsRequestTopicPartition) Default() { +} + +// NewAlterPartitionAssignmentsRequestTopicPartition returns a default AlterPartitionAssignmentsRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsRequestTopicPartition() AlterPartitionAssignmentsRequestTopicPartition { + var v AlterPartitionAssignmentsRequestTopicPartition + v.Default() + return v +} + +type AlterPartitionAssignmentsRequestTopic struct { + // Topic is a topic to reassign the partitions of. + Topic string + + // Partitions contains partitions to reassign. + Partitions []AlterPartitionAssignmentsRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsRequestTopic. +func (v *AlterPartitionAssignmentsRequestTopic) Default() { +} + +// NewAlterPartitionAssignmentsRequestTopic returns a default AlterPartitionAssignmentsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsRequestTopic() AlterPartitionAssignmentsRequestTopic { + var v AlterPartitionAssignmentsRequestTopic + v.Default() + return v +} + +// AlterPartitionAssignmentsRequest, proposed in KIP-455 and implemented in +// Kafka 2.4.0, is a request to reassign partitions to certain brokers. +// +// ACL wise, this requires ALTER on CLUSTER. +type AlterPartitionAssignmentsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // Topics are topics for which to reassign partitions of. + Topics []AlterPartitionAssignmentsRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterPartitionAssignmentsRequest) Key() int16 { return 45 } +func (*AlterPartitionAssignmentsRequest) MaxVersion() int16 { return 0 } +func (v *AlterPartitionAssignmentsRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterPartitionAssignmentsRequest) GetVersion() int16 { return v.Version } +func (v *AlterPartitionAssignmentsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterPartitionAssignmentsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *AlterPartitionAssignmentsRequest) SetTimeout(timeoutMillis int32) { + v.TimeoutMillis = timeoutMillis +} +func (v *AlterPartitionAssignmentsRequest) IsAdminRequest() {} +func (v *AlterPartitionAssignmentsRequest) ResponseKind() Response { + r := &AlterPartitionAssignmentsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterPartitionAssignmentsRequest) RequestWith(ctx context.Context, r Requestor) (*AlterPartitionAssignmentsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterPartitionAssignmentsResponse) + return resp, err +} + +func (v *AlterPartitionAssignmentsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterPartitionAssignmentsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterPartitionAssignmentsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterPartitionAssignmentsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionAssignmentsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionAssignmentsRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []int32{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterPartitionAssignmentsRequest returns a pointer to a default AlterPartitionAssignmentsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterPartitionAssignmentsRequest() *AlterPartitionAssignmentsRequest { + var v AlterPartitionAssignmentsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsRequest. +func (v *AlterPartitionAssignmentsRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewAlterPartitionAssignmentsRequest returns a default AlterPartitionAssignmentsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsRequest() AlterPartitionAssignmentsRequest { + var v AlterPartitionAssignmentsRequest + v.Default() + return v +} + +type AlterPartitionAssignmentsResponseTopicPartition struct { + // Partition is the partition being responded to. + Partition int32 + + // ErrorCode is the error code returned for partition reassignments. + // + // REQUEST_TIMED_OUT is returned if the request timed out. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to reassign partitions. + // + // NO_REASSIGNMENT_IN_PROGRESS is returned for partition reassignment + // cancellations when the partition was not being reassigned. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic or the topic is being deleted. + ErrorCode int16 + + // ErrorMessage is an informative message if the partition reassignment failed. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsResponseTopicPartition. +func (v *AlterPartitionAssignmentsResponseTopicPartition) Default() { +} + +// NewAlterPartitionAssignmentsResponseTopicPartition returns a default AlterPartitionAssignmentsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsResponseTopicPartition() AlterPartitionAssignmentsResponseTopicPartition { + var v AlterPartitionAssignmentsResponseTopicPartition + v.Default() + return v +} + +type AlterPartitionAssignmentsResponseTopic struct { + // Topic is the topic being responded to. + Topic string + + // Partitions contains responses for partitions. + Partitions []AlterPartitionAssignmentsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsResponseTopic. +func (v *AlterPartitionAssignmentsResponseTopic) Default() { +} + +// NewAlterPartitionAssignmentsResponseTopic returns a default AlterPartitionAssignmentsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsResponseTopic() AlterPartitionAssignmentsResponseTopic { + var v AlterPartitionAssignmentsResponseTopic + v.Default() + return v +} + +// AlterPartitionAssignmentsResponse is returned for an AlterPartitionAssignmentsRequest. +type AlterPartitionAssignmentsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is any global (applied to all partitions) error code. + ErrorCode int16 + + // ErrorMessage is any global (applied to all partitions) error message. + ErrorMessage *string + + // Topics contains responses for each topic requested. + Topics []AlterPartitionAssignmentsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterPartitionAssignmentsResponse) Key() int16 { return 45 } +func (*AlterPartitionAssignmentsResponse) MaxVersion() int16 { return 0 } +func (v *AlterPartitionAssignmentsResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterPartitionAssignmentsResponse) GetVersion() int16 { return v.Version } +func (v *AlterPartitionAssignmentsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterPartitionAssignmentsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *AlterPartitionAssignmentsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AlterPartitionAssignmentsResponse) RequestKind() Request { + return &AlterPartitionAssignmentsRequest{Version: v.Version} +} + +func (v *AlterPartitionAssignmentsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterPartitionAssignmentsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterPartitionAssignmentsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterPartitionAssignmentsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionAssignmentsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionAssignmentsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterPartitionAssignmentsResponse returns a pointer to a default AlterPartitionAssignmentsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterPartitionAssignmentsResponse() *AlterPartitionAssignmentsResponse { + var v AlterPartitionAssignmentsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsResponse. +func (v *AlterPartitionAssignmentsResponse) Default() { +} + +// NewAlterPartitionAssignmentsResponse returns a default AlterPartitionAssignmentsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsResponse() AlterPartitionAssignmentsResponse { + var v AlterPartitionAssignmentsResponse + v.Default() + return v +} + +type ListPartitionReassignmentsRequestTopic struct { + // Topic is a topic to list in progress partition reassingments of. + Topic string + + // Partitions are partitions to list in progress reassignments of. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsRequestTopic. +func (v *ListPartitionReassignmentsRequestTopic) Default() { +} + +// NewListPartitionReassignmentsRequestTopic returns a default ListPartitionReassignmentsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsRequestTopic() ListPartitionReassignmentsRequestTopic { + var v ListPartitionReassignmentsRequestTopic + v.Default() + return v +} + +// ListPartitionReassignmentsRequest, proposed in KIP-455 and implemented in +// Kafka 2.4.0, is a request to list in progress partition reassignments. +// +// ACL wise, this requires DESCRIBE on CLUSTER. +type ListPartitionReassignmentsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // Topics are topics to list in progress partition reassignments of, or null + // to list everything. + Topics []ListPartitionReassignmentsRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ListPartitionReassignmentsRequest) Key() int16 { return 46 } +func (*ListPartitionReassignmentsRequest) MaxVersion() int16 { return 0 } +func (v *ListPartitionReassignmentsRequest) SetVersion(version int16) { v.Version = version } +func (v *ListPartitionReassignmentsRequest) GetVersion() int16 { return v.Version } +func (v *ListPartitionReassignmentsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *ListPartitionReassignmentsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *ListPartitionReassignmentsRequest) SetTimeout(timeoutMillis int32) { + v.TimeoutMillis = timeoutMillis +} +func (v *ListPartitionReassignmentsRequest) IsAdminRequest() {} +func (v *ListPartitionReassignmentsRequest) ResponseKind() Response { + r := &ListPartitionReassignmentsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ListPartitionReassignmentsRequest) RequestWith(ctx context.Context, r Requestor) (*ListPartitionReassignmentsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ListPartitionReassignmentsResponse) + return resp, err +} + +func (v *ListPartitionReassignmentsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListPartitionReassignmentsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListPartitionReassignmentsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListPartitionReassignmentsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []ListPartitionReassignmentsRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListPartitionReassignmentsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListPartitionReassignmentsRequest returns a pointer to a default ListPartitionReassignmentsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListPartitionReassignmentsRequest() *ListPartitionReassignmentsRequest { + var v ListPartitionReassignmentsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsRequest. +func (v *ListPartitionReassignmentsRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewListPartitionReassignmentsRequest returns a default ListPartitionReassignmentsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsRequest() ListPartitionReassignmentsRequest { + var v ListPartitionReassignmentsRequest + v.Default() + return v +} + +type ListPartitionReassignmentsResponseTopicPartition struct { + // Partition is the partition being responded to. + Partition int32 + + // Replicas is the partition's current replicas. + Replicas []int32 + + // AddingReplicas are replicas currently being added to the partition. + AddingReplicas []int32 + + // RemovingReplicas are replicas currently being removed from the partition. + RemovingReplicas []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsResponseTopicPartition. +func (v *ListPartitionReassignmentsResponseTopicPartition) Default() { +} + +// NewListPartitionReassignmentsResponseTopicPartition returns a default ListPartitionReassignmentsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsResponseTopicPartition() ListPartitionReassignmentsResponseTopicPartition { + var v ListPartitionReassignmentsResponseTopicPartition + v.Default() + return v +} + +type ListPartitionReassignmentsResponseTopic struct { + // Topic is the topic being responded to. + Topic string + + // Partitions contains responses for partitions. + Partitions []ListPartitionReassignmentsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsResponseTopic. +func (v *ListPartitionReassignmentsResponseTopic) Default() { +} + +// NewListPartitionReassignmentsResponseTopic returns a default ListPartitionReassignmentsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsResponseTopic() ListPartitionReassignmentsResponseTopic { + var v ListPartitionReassignmentsResponseTopic + v.Default() + return v +} + +// ListPartitionReassignmentsResponse is returned for a ListPartitionReassignmentsRequest. +type ListPartitionReassignmentsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is the error code returned for listing reassignments. + // + // REQUEST_TIMED_OUT is returned if the request timed out. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to reassign partitions. + ErrorCode int16 + + // ErrorMessage is any global (applied to all partitions) error message. + ErrorMessage *string + + // Topics contains responses for each topic requested. + Topics []ListPartitionReassignmentsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ListPartitionReassignmentsResponse) Key() int16 { return 46 } +func (*ListPartitionReassignmentsResponse) MaxVersion() int16 { return 0 } +func (v *ListPartitionReassignmentsResponse) SetVersion(version int16) { v.Version = version } +func (v *ListPartitionReassignmentsResponse) GetVersion() int16 { return v.Version } +func (v *ListPartitionReassignmentsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *ListPartitionReassignmentsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *ListPartitionReassignmentsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ListPartitionReassignmentsResponse) RequestKind() Request { + return &ListPartitionReassignmentsRequest{Version: v.Version} +} + +func (v *ListPartitionReassignmentsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.AddingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.RemovingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListPartitionReassignmentsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListPartitionReassignmentsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListPartitionReassignmentsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListPartitionReassignmentsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListPartitionReassignmentsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + { + v := s.AddingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.AddingReplicas = v + } + { + v := s.RemovingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.RemovingReplicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListPartitionReassignmentsResponse returns a pointer to a default ListPartitionReassignmentsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListPartitionReassignmentsResponse() *ListPartitionReassignmentsResponse { + var v ListPartitionReassignmentsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsResponse. +func (v *ListPartitionReassignmentsResponse) Default() { +} + +// NewListPartitionReassignmentsResponse returns a default ListPartitionReassignmentsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsResponse() ListPartitionReassignmentsResponse { + var v ListPartitionReassignmentsResponse + v.Default() + return v +} + +type OffsetDeleteRequestTopicPartition struct { + // Partition is a partition to delete offsets for. + Partition int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteRequestTopicPartition. +func (v *OffsetDeleteRequestTopicPartition) Default() { +} + +// NewOffsetDeleteRequestTopicPartition returns a default OffsetDeleteRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteRequestTopicPartition() OffsetDeleteRequestTopicPartition { + var v OffsetDeleteRequestTopicPartition + v.Default() + return v +} + +type OffsetDeleteRequestTopic struct { + // Topic is a topic to delete offsets in. + Topic string + + // Partitions are partitions to delete offsets for. + Partitions []OffsetDeleteRequestTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteRequestTopic. +func (v *OffsetDeleteRequestTopic) Default() { +} + +// NewOffsetDeleteRequestTopic returns a default OffsetDeleteRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteRequestTopic() OffsetDeleteRequestTopic { + var v OffsetDeleteRequestTopic + v.Default() + return v +} + +// OffsetDeleteRequest, proposed in KIP-496 and implemented in Kafka 2.4.0, is +// a request to delete group offsets. +// +// ACL wise, this requires DELETE on GROUP for the group and READ on TOPIC for +// each topic. +type OffsetDeleteRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group to delete offsets in. + Group string + + // Topics are topics to delete offsets in. + Topics []OffsetDeleteRequestTopic +} + +func (*OffsetDeleteRequest) Key() int16 { return 47 } +func (*OffsetDeleteRequest) MaxVersion() int16 { return 0 } +func (v *OffsetDeleteRequest) SetVersion(version int16) { v.Version = version } +func (v *OffsetDeleteRequest) GetVersion() int16 { return v.Version } +func (v *OffsetDeleteRequest) IsFlexible() bool { return false } +func (v *OffsetDeleteRequest) IsGroupCoordinatorRequest() {} +func (v *OffsetDeleteRequest) ResponseKind() Response { + r := &OffsetDeleteResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *OffsetDeleteRequest) RequestWith(ctx context.Context, r Requestor) (*OffsetDeleteResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*OffsetDeleteResponse) + return resp, err +} + +func (v *OffsetDeleteRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Group + dst = kbin.AppendString(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + return dst +} + +func (v *OffsetDeleteRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetDeleteRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetDeleteRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Group = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetDeleteRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetDeleteRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrOffsetDeleteRequest returns a pointer to a default OffsetDeleteRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetDeleteRequest() *OffsetDeleteRequest { + var v OffsetDeleteRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteRequest. +func (v *OffsetDeleteRequest) Default() { +} + +// NewOffsetDeleteRequest returns a default OffsetDeleteRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteRequest() OffsetDeleteRequest { + var v OffsetDeleteRequest + v.Default() + return v +} + +type OffsetDeleteResponseTopicPartition struct { + // Partition is the partition being responded to. + Partition int32 + + // ErrorCode is any per partition error code. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // for the topic / partition. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic. + // + // GROUP_SUBSCRIBED_TO_TOPIC is returned if the topic is still subscribed to. + ErrorCode int16 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteResponseTopicPartition. +func (v *OffsetDeleteResponseTopicPartition) Default() { +} + +// NewOffsetDeleteResponseTopicPartition returns a default OffsetDeleteResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteResponseTopicPartition() OffsetDeleteResponseTopicPartition { + var v OffsetDeleteResponseTopicPartition + v.Default() + return v +} + +type OffsetDeleteResponseTopic struct { + // Topic is the topic being responded to. + Topic string + + // Partitions are partitions being responded to. + Partitions []OffsetDeleteResponseTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteResponseTopic. +func (v *OffsetDeleteResponseTopic) Default() { +} + +// NewOffsetDeleteResponseTopic returns a default OffsetDeleteResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteResponseTopic() OffsetDeleteResponseTopic { + var v OffsetDeleteResponseTopic + v.Default() + return v +} + +// OffsetDeleteResponse is a response to an offset delete request. +type OffsetDeleteResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any group wide error. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // for the group. + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // GROUP_ID_NOT_FOUND is returned if the group ID does not exist. + ErrorCode int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Topics are responses to requested topics. + Topics []OffsetDeleteResponseTopic +} + +func (*OffsetDeleteResponse) Key() int16 { return 47 } +func (*OffsetDeleteResponse) MaxVersion() int16 { return 0 } +func (v *OffsetDeleteResponse) SetVersion(version int16) { v.Version = version } +func (v *OffsetDeleteResponse) GetVersion() int16 { return v.Version } +func (v *OffsetDeleteResponse) IsFlexible() bool { return false } +func (v *OffsetDeleteResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *OffsetDeleteResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *OffsetDeleteResponse) RequestKind() Request { return &OffsetDeleteRequest{Version: v.Version} } + +func (v *OffsetDeleteResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + } + } + } + } + return dst +} + +func (v *OffsetDeleteResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetDeleteResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetDeleteResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetDeleteResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetDeleteResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrOffsetDeleteResponse returns a pointer to a default OffsetDeleteResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetDeleteResponse() *OffsetDeleteResponse { + var v OffsetDeleteResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteResponse. +func (v *OffsetDeleteResponse) Default() { +} + +// NewOffsetDeleteResponse returns a default OffsetDeleteResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteResponse() OffsetDeleteResponse { + var v OffsetDeleteResponse + v.Default() + return v +} + +type DescribeClientQuotasRequestComponent struct { + // EntityType is the entity component type that this filter component + // applies to; some possible values are "user" or "client-id". + EntityType string + + // MatchType specifies how to match an entity, + // with 0 meaning match on the name exactly, + // 1 meaning match on the default name, + // and 2 meaning any specified name. + MatchType QuotasMatchType + + // Match is the string to match against, or null if unused for the given + // match type. + Match *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasRequestComponent. +func (v *DescribeClientQuotasRequestComponent) Default() { +} + +// NewDescribeClientQuotasRequestComponent returns a default DescribeClientQuotasRequestComponent +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasRequestComponent() DescribeClientQuotasRequestComponent { + var v DescribeClientQuotasRequestComponent + v.Default() + return v +} + +// DescribeClientQuotasRequest, proposed in KIP-546 and introduced with Kafka 2.6.0, +// provides a way to describe client quotas. +type DescribeClientQuotasRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Components is a list of match filters to apply for describing quota entities. + Components []DescribeClientQuotasRequestComponent + + // Strict signifies whether matches are strict; if true, the response + // excludes entities with unspecified entity types. + Strict bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*DescribeClientQuotasRequest) Key() int16 { return 48 } +func (*DescribeClientQuotasRequest) MaxVersion() int16 { return 1 } +func (v *DescribeClientQuotasRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeClientQuotasRequest) GetVersion() int16 { return v.Version } +func (v *DescribeClientQuotasRequest) IsFlexible() bool { return v.Version >= 1 } +func (v *DescribeClientQuotasRequest) ResponseKind() Response { + r := &DescribeClientQuotasResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeClientQuotasRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeClientQuotasResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeClientQuotasResponse) + return resp, err +} + +func (v *DescribeClientQuotasRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Components + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.EntityType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MatchType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Match + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Strict + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeClientQuotasRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeClientQuotasRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeClientQuotasRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Components + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClientQuotasRequestComponent, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.EntityType = v + } + { + var t QuotasMatchType + { + v := b.Int8() + t = QuotasMatchType(v) + } + v := t + s.MatchType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Match = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Components = v + } + { + v := b.Bool() + s.Strict = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeClientQuotasRequest returns a pointer to a default DescribeClientQuotasRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeClientQuotasRequest() *DescribeClientQuotasRequest { + var v DescribeClientQuotasRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasRequest. +func (v *DescribeClientQuotasRequest) Default() { +} + +// NewDescribeClientQuotasRequest returns a default DescribeClientQuotasRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasRequest() DescribeClientQuotasRequest { + var v DescribeClientQuotasRequest + v.Default() + return v +} + +type DescribeClientQuotasResponseEntryEntity struct { + // Type is the entity type. + Type string + + // Name is the entity name, or null if the default. + Name *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasResponseEntryEntity. +func (v *DescribeClientQuotasResponseEntryEntity) Default() { +} + +// NewDescribeClientQuotasResponseEntryEntity returns a default DescribeClientQuotasResponseEntryEntity +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasResponseEntryEntity() DescribeClientQuotasResponseEntryEntity { + var v DescribeClientQuotasResponseEntryEntity + v.Default() + return v +} + +type DescribeClientQuotasResponseEntryValue struct { + // Key is the quota configuration key. + Key string + + // Value is the quota configuration value. + Value float64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasResponseEntryValue. +func (v *DescribeClientQuotasResponseEntryValue) Default() { +} + +// NewDescribeClientQuotasResponseEntryValue returns a default DescribeClientQuotasResponseEntryValue +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasResponseEntryValue() DescribeClientQuotasResponseEntryValue { + var v DescribeClientQuotasResponseEntryValue + v.Default() + return v +} + +type DescribeClientQuotasResponseEntry struct { + // Entity contains the quota entity components being described. + Entity []DescribeClientQuotasResponseEntryEntity + + // Values are quota values for the entity. + Values []DescribeClientQuotasResponseEntryValue + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasResponseEntry. +func (v *DescribeClientQuotasResponseEntry) Default() { +} + +// NewDescribeClientQuotasResponseEntry returns a default DescribeClientQuotasResponseEntry +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasResponseEntry() DescribeClientQuotasResponseEntry { + var v DescribeClientQuotasResponseEntry + v.Default() + return v +} + +// DescribeClientQuotasResponse is a response for a DescribeClientQuotasRequest. +type DescribeClientQuotasResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is any error for the request. + ErrorCode int16 + + // ErrorMessage is an error message for the request, or null if the request succeeded. + ErrorMessage *string + + // Entries contains entities that were matched. + Entries []DescribeClientQuotasResponseEntry + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*DescribeClientQuotasResponse) Key() int16 { return 48 } +func (*DescribeClientQuotasResponse) MaxVersion() int16 { return 1 } +func (v *DescribeClientQuotasResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeClientQuotasResponse) GetVersion() int16 { return v.Version } +func (v *DescribeClientQuotasResponse) IsFlexible() bool { return v.Version >= 1 } +func (v *DescribeClientQuotasResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *DescribeClientQuotasResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeClientQuotasResponse) RequestKind() Request { + return &DescribeClientQuotasRequest{Version: v.Version} +} + +func (v *DescribeClientQuotasResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Entries + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Entity + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Type + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Values + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Key + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + dst = kbin.AppendFloat64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeClientQuotasResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeClientQuotasResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeClientQuotasResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Entries + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []DescribeClientQuotasResponseEntry{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClientQuotasResponseEntry, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := s.Entity + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClientQuotasResponseEntryEntity, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Type = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Name = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entity = v + } + { + v := s.Values + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClientQuotasResponseEntryValue, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Key = v + } + { + v := b.Float64() + s.Value = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Values = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entries = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeClientQuotasResponse returns a pointer to a default DescribeClientQuotasResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeClientQuotasResponse() *DescribeClientQuotasResponse { + var v DescribeClientQuotasResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasResponse. +func (v *DescribeClientQuotasResponse) Default() { +} + +// NewDescribeClientQuotasResponse returns a default DescribeClientQuotasResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasResponse() DescribeClientQuotasResponse { + var v DescribeClientQuotasResponse + v.Default() + return v +} + +type AlterClientQuotasRequestEntryEntity struct { + // Type is the entity component's type; e.g. "client-id", "user" or "ip". + Type string + + // Name is the name of the entity, or null for the default. + Name *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasRequestEntryEntity. +func (v *AlterClientQuotasRequestEntryEntity) Default() { +} + +// NewAlterClientQuotasRequestEntryEntity returns a default AlterClientQuotasRequestEntryEntity +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasRequestEntryEntity() AlterClientQuotasRequestEntryEntity { + var v AlterClientQuotasRequestEntryEntity + v.Default() + return v +} + +type AlterClientQuotasRequestEntryOp struct { + // Key is the quota configuration key to alter. + Key string + + // Value is the value to set; ignored if remove is true. + Value float64 + + // Remove is whether the quota configuration value should be removed or set. + Remove bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasRequestEntryOp. +func (v *AlterClientQuotasRequestEntryOp) Default() { +} + +// NewAlterClientQuotasRequestEntryOp returns a default AlterClientQuotasRequestEntryOp +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasRequestEntryOp() AlterClientQuotasRequestEntryOp { + var v AlterClientQuotasRequestEntryOp + v.Default() + return v +} + +type AlterClientQuotasRequestEntry struct { + // Entity contains the components of a quota entity to alter. + Entity []AlterClientQuotasRequestEntryEntity + + // Ops contains quota configuration entries to alter. + Ops []AlterClientQuotasRequestEntryOp + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasRequestEntry. +func (v *AlterClientQuotasRequestEntry) Default() { +} + +// NewAlterClientQuotasRequestEntry returns a default AlterClientQuotasRequestEntry +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasRequestEntry() AlterClientQuotasRequestEntry { + var v AlterClientQuotasRequestEntry + v.Default() + return v +} + +// AlterClientQuotaRequest, proposed in KIP-546 and introduced with Kafka 2.6.0, +// provides a way to alter client quotas. +type AlterClientQuotasRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Entries are quota configuration entries to alter. + Entries []AlterClientQuotasRequestEntry + + // ValidateOnly is makes this request a dry-run; the alteration is validated + // but not performed. + ValidateOnly bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*AlterClientQuotasRequest) Key() int16 { return 49 } +func (*AlterClientQuotasRequest) MaxVersion() int16 { return 1 } +func (v *AlterClientQuotasRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterClientQuotasRequest) GetVersion() int16 { return v.Version } +func (v *AlterClientQuotasRequest) IsFlexible() bool { return v.Version >= 1 } +func (v *AlterClientQuotasRequest) ResponseKind() Response { + r := &AlterClientQuotasResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterClientQuotasRequest) RequestWith(ctx context.Context, r Requestor) (*AlterClientQuotasResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterClientQuotasResponse) + return resp, err +} + +func (v *AlterClientQuotasRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Entries + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Entity + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Type + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Ops + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Key + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + dst = kbin.AppendFloat64(dst, v) + } + { + v := v.Remove + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterClientQuotasRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterClientQuotasRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterClientQuotasRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Entries + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasRequestEntry, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := s.Entity + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasRequestEntryEntity, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Type = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Name = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entity = v + } + { + v := s.Ops + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasRequestEntryOp, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Key = v + } + { + v := b.Float64() + s.Value = v + } + { + v := b.Bool() + s.Remove = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Ops = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entries = v + } + { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterClientQuotasRequest returns a pointer to a default AlterClientQuotasRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterClientQuotasRequest() *AlterClientQuotasRequest { + var v AlterClientQuotasRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasRequest. +func (v *AlterClientQuotasRequest) Default() { +} + +// NewAlterClientQuotasRequest returns a default AlterClientQuotasRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasRequest() AlterClientQuotasRequest { + var v AlterClientQuotasRequest + v.Default() + return v +} + +type AlterClientQuotasResponseEntryEntity struct { + // Type is the entity component's type; e.g. "client-id" or "user". + Type string + + // Name is the name of the entity, or null for the default. + Name *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasResponseEntryEntity. +func (v *AlterClientQuotasResponseEntryEntity) Default() { +} + +// NewAlterClientQuotasResponseEntryEntity returns a default AlterClientQuotasResponseEntryEntity +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasResponseEntryEntity() AlterClientQuotasResponseEntryEntity { + var v AlterClientQuotasResponseEntryEntity + v.Default() + return v +} + +type AlterClientQuotasResponseEntry struct { + // ErrorCode is the error code for an alter on a matched entity. + ErrorCode int16 + + // ErrorMessage is an informative message if the alter on this entity failed. + ErrorMessage *string + + // Entity contains the components of a matched entity. + Entity []AlterClientQuotasResponseEntryEntity + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasResponseEntry. +func (v *AlterClientQuotasResponseEntry) Default() { +} + +// NewAlterClientQuotasResponseEntry returns a default AlterClientQuotasResponseEntry +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasResponseEntry() AlterClientQuotasResponseEntry { + var v AlterClientQuotasResponseEntry + v.Default() + return v +} + +// AlterClientQuotasResponse is a response to an AlterClientQuotasRequest. +type AlterClientQuotasResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Entries contains results for the alter request. + Entries []AlterClientQuotasResponseEntry + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*AlterClientQuotasResponse) Key() int16 { return 49 } +func (*AlterClientQuotasResponse) MaxVersion() int16 { return 1 } +func (v *AlterClientQuotasResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterClientQuotasResponse) GetVersion() int16 { return v.Version } +func (v *AlterClientQuotasResponse) IsFlexible() bool { return v.Version >= 1 } +func (v *AlterClientQuotasResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *AlterClientQuotasResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AlterClientQuotasResponse) RequestKind() Request { + return &AlterClientQuotasRequest{Version: v.Version} +} + +func (v *AlterClientQuotasResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Entries + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Entity + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Type + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterClientQuotasResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterClientQuotasResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterClientQuotasResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Entries + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasResponseEntry, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Entity + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasResponseEntryEntity, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Type = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Name = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entity = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entries = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterClientQuotasResponse returns a pointer to a default AlterClientQuotasResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterClientQuotasResponse() *AlterClientQuotasResponse { + var v AlterClientQuotasResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasResponse. +func (v *AlterClientQuotasResponse) Default() { +} + +// NewAlterClientQuotasResponse returns a default AlterClientQuotasResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasResponse() AlterClientQuotasResponse { + var v AlterClientQuotasResponse + v.Default() + return v +} + +type DescribeUserSCRAMCredentialsRequestUser struct { + // The user name. + Name string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsRequestUser. +func (v *DescribeUserSCRAMCredentialsRequestUser) Default() { +} + +// NewDescribeUserSCRAMCredentialsRequestUser returns a default DescribeUserSCRAMCredentialsRequestUser +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsRequestUser() DescribeUserSCRAMCredentialsRequestUser { + var v DescribeUserSCRAMCredentialsRequestUser + v.Default() + return v +} + +// DescribeUserSCRAMCredentialsRequest, proposed in KIP-554 and introduced +// with Kafka 2.7.0, describes user SCRAM credentials. +// +// This request was introduced as part of the overarching KIP-500 initiative, +// which is to remove Zookeeper as a dependency. +// +// This request requires DESCRIBE on CLUSTER. +type DescribeUserSCRAMCredentialsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The users to describe, or null to describe all. + Users []DescribeUserSCRAMCredentialsRequestUser + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeUserSCRAMCredentialsRequest) Key() int16 { return 50 } +func (*DescribeUserSCRAMCredentialsRequest) MaxVersion() int16 { return 0 } +func (v *DescribeUserSCRAMCredentialsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeUserSCRAMCredentialsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeUserSCRAMCredentialsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeUserSCRAMCredentialsRequest) ResponseKind() Response { + r := &DescribeUserSCRAMCredentialsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeUserSCRAMCredentialsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeUserSCRAMCredentialsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeUserSCRAMCredentialsResponse) + return resp, err +} + +func (v *DescribeUserSCRAMCredentialsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Users + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeUserSCRAMCredentialsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeUserSCRAMCredentialsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeUserSCRAMCredentialsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Users + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []DescribeUserSCRAMCredentialsRequestUser{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeUserSCRAMCredentialsRequestUser, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Users = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeUserSCRAMCredentialsRequest returns a pointer to a default DescribeUserSCRAMCredentialsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeUserSCRAMCredentialsRequest() *DescribeUserSCRAMCredentialsRequest { + var v DescribeUserSCRAMCredentialsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsRequest. +func (v *DescribeUserSCRAMCredentialsRequest) Default() { +} + +// NewDescribeUserSCRAMCredentialsRequest returns a default DescribeUserSCRAMCredentialsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsRequest() DescribeUserSCRAMCredentialsRequest { + var v DescribeUserSCRAMCredentialsRequest + v.Default() + return v +} + +type DescribeUserSCRAMCredentialsResponseResultCredentialInfo struct { + // The SCRAM mechanism for this user, where 0 is UNKNOWN, 1 is SCRAM-SHA-256, + // and 2 is SCRAM-SHA-512. + Mechanism int8 + + // The number of iterations used in the SCRAM credential. + Iterations int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsResponseResultCredentialInfo. +func (v *DescribeUserSCRAMCredentialsResponseResultCredentialInfo) Default() { +} + +// NewDescribeUserSCRAMCredentialsResponseResultCredentialInfo returns a default DescribeUserSCRAMCredentialsResponseResultCredentialInfo +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsResponseResultCredentialInfo() DescribeUserSCRAMCredentialsResponseResultCredentialInfo { + var v DescribeUserSCRAMCredentialsResponseResultCredentialInfo + v.Default() + return v +} + +type DescribeUserSCRAMCredentialsResponseResult struct { + // The name this result corresponds to. + User string + + // The user-level error code. + // + // RESOURCE_NOT_FOUND if the user does not exist or has no credentials. + // + // DUPLICATE_RESOURCE if the user is requested twice+. + ErrorCode int16 + + // The user-level error message, if any. + ErrorMessage *string + + // Information about the SCRAM credentials for this user. + CredentialInfos []DescribeUserSCRAMCredentialsResponseResultCredentialInfo + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsResponseResult. +func (v *DescribeUserSCRAMCredentialsResponseResult) Default() { +} + +// NewDescribeUserSCRAMCredentialsResponseResult returns a default DescribeUserSCRAMCredentialsResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsResponseResult() DescribeUserSCRAMCredentialsResponseResult { + var v DescribeUserSCRAMCredentialsResponseResult + v.Default() + return v +} + +// DescribeUserSCRAMCredentialsResponse is a response for a +// DescribeUserSCRAMCredentialsRequest. +type DescribeUserSCRAMCredentialsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The request-level error code. This is 0 except for auth or infra issues. + // + // CLUSTER_AUTHORIZATION_FAILED if you do not have DESCRIBE on CLUSTER. + ErrorCode int16 + + // The request-level error message, if any. + ErrorMessage *string + + // Results for descriptions, one per user. + Results []DescribeUserSCRAMCredentialsResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeUserSCRAMCredentialsResponse) Key() int16 { return 50 } +func (*DescribeUserSCRAMCredentialsResponse) MaxVersion() int16 { return 0 } +func (v *DescribeUserSCRAMCredentialsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeUserSCRAMCredentialsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeUserSCRAMCredentialsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeUserSCRAMCredentialsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *DescribeUserSCRAMCredentialsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeUserSCRAMCredentialsResponse) RequestKind() Request { + return &DescribeUserSCRAMCredentialsRequest{Version: v.Version} +} + +func (v *DescribeUserSCRAMCredentialsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.User + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.CredentialInfos + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Mechanism + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Iterations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeUserSCRAMCredentialsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeUserSCRAMCredentialsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeUserSCRAMCredentialsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeUserSCRAMCredentialsResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.User = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.CredentialInfos + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeUserSCRAMCredentialsResponseResultCredentialInfo, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int8() + s.Mechanism = v + } + { + v := b.Int32() + s.Iterations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.CredentialInfos = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeUserSCRAMCredentialsResponse returns a pointer to a default DescribeUserSCRAMCredentialsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeUserSCRAMCredentialsResponse() *DescribeUserSCRAMCredentialsResponse { + var v DescribeUserSCRAMCredentialsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsResponse. +func (v *DescribeUserSCRAMCredentialsResponse) Default() { +} + +// NewDescribeUserSCRAMCredentialsResponse returns a default DescribeUserSCRAMCredentialsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsResponse() DescribeUserSCRAMCredentialsResponse { + var v DescribeUserSCRAMCredentialsResponse + v.Default() + return v +} + +type AlterUserSCRAMCredentialsRequestDeletion struct { + // The user name to match for removal. + Name string + + // The mechanism for the user name to remove. + Mechanism int8 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsRequestDeletion. +func (v *AlterUserSCRAMCredentialsRequestDeletion) Default() { +} + +// NewAlterUserSCRAMCredentialsRequestDeletion returns a default AlterUserSCRAMCredentialsRequestDeletion +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsRequestDeletion() AlterUserSCRAMCredentialsRequestDeletion { + var v AlterUserSCRAMCredentialsRequestDeletion + v.Default() + return v +} + +type AlterUserSCRAMCredentialsRequestUpsertion struct { + // The user name to use. + Name string + + // The mechanism to use for creating, where 1 is SCRAM-SHA-256 and 2 is + // SCRAM-SHA-512. + Mechanism int8 + + // The number of iterations to use. This must be more than the minimum for + // the mechanism and cannot be more than 16384. + Iterations int32 + + // A random salt generated by the client. + Salt []byte + + // The salted password to use. + SaltedPassword []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsRequestUpsertion. +func (v *AlterUserSCRAMCredentialsRequestUpsertion) Default() { +} + +// NewAlterUserSCRAMCredentialsRequestUpsertion returns a default AlterUserSCRAMCredentialsRequestUpsertion +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsRequestUpsertion() AlterUserSCRAMCredentialsRequestUpsertion { + var v AlterUserSCRAMCredentialsRequestUpsertion + v.Default() + return v +} + +// AlterUserSCRAMCredentialsRequest, proposed in KIP-554 and introduced +// with Kafka 2.7.0, alters or deletes user SCRAM credentials. +// +// This request was introduced as part of the overarching KIP-500 initiative, +// which is to remove Zookeeper as a dependency. +// +// This request requires ALTER on CLUSTER. +type AlterUserSCRAMCredentialsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The SCRAM credentials to remove. + Deletions []AlterUserSCRAMCredentialsRequestDeletion + + // The SCRAM credentials to update or insert. + Upsertions []AlterUserSCRAMCredentialsRequestUpsertion + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterUserSCRAMCredentialsRequest) Key() int16 { return 51 } +func (*AlterUserSCRAMCredentialsRequest) MaxVersion() int16 { return 0 } +func (v *AlterUserSCRAMCredentialsRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterUserSCRAMCredentialsRequest) GetVersion() int16 { return v.Version } +func (v *AlterUserSCRAMCredentialsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterUserSCRAMCredentialsRequest) IsAdminRequest() {} +func (v *AlterUserSCRAMCredentialsRequest) ResponseKind() Response { + r := &AlterUserSCRAMCredentialsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterUserSCRAMCredentialsRequest) RequestWith(ctx context.Context, r Requestor) (*AlterUserSCRAMCredentialsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterUserSCRAMCredentialsResponse) + return resp, err +} + +func (v *AlterUserSCRAMCredentialsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Deletions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Mechanism + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Upsertions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Mechanism + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Iterations + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Salt + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.SaltedPassword + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterUserSCRAMCredentialsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterUserSCRAMCredentialsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterUserSCRAMCredentialsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Deletions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterUserSCRAMCredentialsRequestDeletion, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int8() + s.Mechanism = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Deletions = v + } + { + v := s.Upsertions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterUserSCRAMCredentialsRequestUpsertion, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int8() + s.Mechanism = v + } + { + v := b.Int32() + s.Iterations = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.Salt = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.SaltedPassword = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Upsertions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterUserSCRAMCredentialsRequest returns a pointer to a default AlterUserSCRAMCredentialsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterUserSCRAMCredentialsRequest() *AlterUserSCRAMCredentialsRequest { + var v AlterUserSCRAMCredentialsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsRequest. +func (v *AlterUserSCRAMCredentialsRequest) Default() { +} + +// NewAlterUserSCRAMCredentialsRequest returns a default AlterUserSCRAMCredentialsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsRequest() AlterUserSCRAMCredentialsRequest { + var v AlterUserSCRAMCredentialsRequest + v.Default() + return v +} + +type AlterUserSCRAMCredentialsResponseResult struct { + // The name this result corresponds to. + User string + + // The user-level error code. + ErrorCode int16 + + // The user-level error message, if any. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsResponseResult. +func (v *AlterUserSCRAMCredentialsResponseResult) Default() { +} + +// NewAlterUserSCRAMCredentialsResponseResult returns a default AlterUserSCRAMCredentialsResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsResponseResult() AlterUserSCRAMCredentialsResponseResult { + var v AlterUserSCRAMCredentialsResponseResult + v.Default() + return v +} + +// AlterUserSCRAMCredentialsResponse is a response for an +// AlterUserSCRAMCredentialsRequest. +type AlterUserSCRAMCredentialsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The results for deletions and upsertions. + Results []AlterUserSCRAMCredentialsResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterUserSCRAMCredentialsResponse) Key() int16 { return 51 } +func (*AlterUserSCRAMCredentialsResponse) MaxVersion() int16 { return 0 } +func (v *AlterUserSCRAMCredentialsResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterUserSCRAMCredentialsResponse) GetVersion() int16 { return v.Version } +func (v *AlterUserSCRAMCredentialsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterUserSCRAMCredentialsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *AlterUserSCRAMCredentialsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AlterUserSCRAMCredentialsResponse) RequestKind() Request { + return &AlterUserSCRAMCredentialsRequest{Version: v.Version} +} + +func (v *AlterUserSCRAMCredentialsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.User + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterUserSCRAMCredentialsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterUserSCRAMCredentialsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterUserSCRAMCredentialsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterUserSCRAMCredentialsResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.User = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterUserSCRAMCredentialsResponse returns a pointer to a default AlterUserSCRAMCredentialsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterUserSCRAMCredentialsResponse() *AlterUserSCRAMCredentialsResponse { + var v AlterUserSCRAMCredentialsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsResponse. +func (v *AlterUserSCRAMCredentialsResponse) Default() { +} + +// NewAlterUserSCRAMCredentialsResponse returns a default AlterUserSCRAMCredentialsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsResponse() AlterUserSCRAMCredentialsResponse { + var v AlterUserSCRAMCredentialsResponse + v.Default() + return v +} + +type VoteRequestTopicPartition struct { + Partition int32 + + // The bumped epoch of the candidate sending the request. + CandidateEpoch int32 + + // The ID of the voter sending the request. + CandidateID int32 + + // The epoch of the last record written to the metadata log. + LastOffsetEpoch int32 + + // The offset of the last record written to the metadata log. + LastOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteRequestTopicPartition. +func (v *VoteRequestTopicPartition) Default() { +} + +// NewVoteRequestTopicPartition returns a default VoteRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteRequestTopicPartition() VoteRequestTopicPartition { + var v VoteRequestTopicPartition + v.Default() + return v +} + +type VoteRequestTopic struct { + Topic string + + Partitions []VoteRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteRequestTopic. +func (v *VoteRequestTopic) Default() { +} + +// NewVoteRequestTopic returns a default VoteRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteRequestTopic() VoteRequestTopic { + var v VoteRequestTopic + v.Default() + return v +} + +// Part of KIP-595 to replace Kafka's dependence on Zookeeper with a +// Kafka-only raft protocol, +// VoteRequest is used by voters to hold a leader election. +// +// Since this is relatively Kafka internal, most fields are left undocumented. +type VoteRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ClusterID *string + + Topics []VoteRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*VoteRequest) Key() int16 { return 52 } +func (*VoteRequest) MaxVersion() int16 { return 0 } +func (v *VoteRequest) SetVersion(version int16) { v.Version = version } +func (v *VoteRequest) GetVersion() int16 { return v.Version } +func (v *VoteRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *VoteRequest) IsAdminRequest() {} +func (v *VoteRequest) ResponseKind() Response { + r := &VoteResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *VoteRequest) RequestWith(ctx context.Context, r Requestor) (*VoteResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*VoteResponse) + return resp, err +} + +func (v *VoteRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ClusterID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CandidateEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CandidateID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LastOffsetEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LastOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *VoteRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *VoteRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *VoteRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ClusterID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]VoteRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]VoteRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.CandidateEpoch = v + } + { + v := b.Int32() + s.CandidateID = v + } + { + v := b.Int32() + s.LastOffsetEpoch = v + } + { + v := b.Int64() + s.LastOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrVoteRequest returns a pointer to a default VoteRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrVoteRequest() *VoteRequest { + var v VoteRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteRequest. +func (v *VoteRequest) Default() { +} + +// NewVoteRequest returns a default VoteRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteRequest() VoteRequest { + var v VoteRequest + v.Default() + return v +} + +type VoteResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The ID of the current leader, or -1 if the leader is unknown. + LeaderID int32 + + // The latest known leader epoch. + LeaderEpoch int32 + + // Whether the vote was granted. + VoteGranted bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteResponseTopicPartition. +func (v *VoteResponseTopicPartition) Default() { +} + +// NewVoteResponseTopicPartition returns a default VoteResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteResponseTopicPartition() VoteResponseTopicPartition { + var v VoteResponseTopicPartition + v.Default() + return v +} + +type VoteResponseTopic struct { + Topic string + + Partitions []VoteResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteResponseTopic. +func (v *VoteResponseTopic) Default() { +} + +// NewVoteResponseTopic returns a default VoteResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteResponseTopic() VoteResponseTopic { + var v VoteResponseTopic + v.Default() + return v +} + +type VoteResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Topics []VoteResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*VoteResponse) Key() int16 { return 52 } +func (*VoteResponse) MaxVersion() int16 { return 0 } +func (v *VoteResponse) SetVersion(version int16) { v.Version = version } +func (v *VoteResponse) GetVersion() int16 { return v.Version } +func (v *VoteResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *VoteResponse) RequestKind() Request { return &VoteRequest{Version: v.Version} } + +func (v *VoteResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.VoteGranted + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *VoteResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *VoteResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *VoteResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]VoteResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]VoteResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := b.Bool() + s.VoteGranted = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrVoteResponse returns a pointer to a default VoteResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrVoteResponse() *VoteResponse { + var v VoteResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteResponse. +func (v *VoteResponse) Default() { +} + +// NewVoteResponse returns a default VoteResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteResponse() VoteResponse { + var v VoteResponse + v.Default() + return v +} + +type BeginQuorumEpochRequestTopicPartition struct { + Partition int32 + + // The ID of the newly elected leader. + LeaderID int32 + + // The epoch of the newly elected leader. + LeaderEpoch int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochRequestTopicPartition. +func (v *BeginQuorumEpochRequestTopicPartition) Default() { +} + +// NewBeginQuorumEpochRequestTopicPartition returns a default BeginQuorumEpochRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochRequestTopicPartition() BeginQuorumEpochRequestTopicPartition { + var v BeginQuorumEpochRequestTopicPartition + v.Default() + return v +} + +type BeginQuorumEpochRequestTopic struct { + Topic string + + Partitions []BeginQuorumEpochRequestTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochRequestTopic. +func (v *BeginQuorumEpochRequestTopic) Default() { +} + +// NewBeginQuorumEpochRequestTopic returns a default BeginQuorumEpochRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochRequestTopic() BeginQuorumEpochRequestTopic { + var v BeginQuorumEpochRequestTopic + v.Default() + return v +} + +// Part of KIP-595 to replace Kafka's dependence on Zookeeper with a +// Kafka-only raft protocol, +// BeginQuorumEpochRequest is sent by a leader (once it has enough votes) +// to all voters in the election. +// +// Since this is relatively Kafka internal, most fields are left undocumented. +type BeginQuorumEpochRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ClusterID *string + + Topics []BeginQuorumEpochRequestTopic +} + +func (*BeginQuorumEpochRequest) Key() int16 { return 53 } +func (*BeginQuorumEpochRequest) MaxVersion() int16 { return 0 } +func (v *BeginQuorumEpochRequest) SetVersion(version int16) { v.Version = version } +func (v *BeginQuorumEpochRequest) GetVersion() int16 { return v.Version } +func (v *BeginQuorumEpochRequest) IsFlexible() bool { return false } +func (v *BeginQuorumEpochRequest) IsAdminRequest() {} +func (v *BeginQuorumEpochRequest) ResponseKind() Response { + r := &BeginQuorumEpochResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *BeginQuorumEpochRequest) RequestWith(ctx context.Context, r Requestor) (*BeginQuorumEpochResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*BeginQuorumEpochResponse) + return resp, err +} + +func (v *BeginQuorumEpochRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ClusterID + dst = kbin.AppendNullableString(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + return dst +} + +func (v *BeginQuorumEpochRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BeginQuorumEpochRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BeginQuorumEpochRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.ClusterID = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BeginQuorumEpochRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BeginQuorumEpochRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrBeginQuorumEpochRequest returns a pointer to a default BeginQuorumEpochRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBeginQuorumEpochRequest() *BeginQuorumEpochRequest { + var v BeginQuorumEpochRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochRequest. +func (v *BeginQuorumEpochRequest) Default() { +} + +// NewBeginQuorumEpochRequest returns a default BeginQuorumEpochRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochRequest() BeginQuorumEpochRequest { + var v BeginQuorumEpochRequest + v.Default() + return v +} + +type BeginQuorumEpochResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The ID of the current leader, or -1 if the leader is unknown. + LeaderID int32 + + // The latest known leader epoch. + LeaderEpoch int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochResponseTopicPartition. +func (v *BeginQuorumEpochResponseTopicPartition) Default() { +} + +// NewBeginQuorumEpochResponseTopicPartition returns a default BeginQuorumEpochResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochResponseTopicPartition() BeginQuorumEpochResponseTopicPartition { + var v BeginQuorumEpochResponseTopicPartition + v.Default() + return v +} + +type BeginQuorumEpochResponseTopic struct { + Topic string + + Partitions []BeginQuorumEpochResponseTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochResponseTopic. +func (v *BeginQuorumEpochResponseTopic) Default() { +} + +// NewBeginQuorumEpochResponseTopic returns a default BeginQuorumEpochResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochResponseTopic() BeginQuorumEpochResponseTopic { + var v BeginQuorumEpochResponseTopic + v.Default() + return v +} + +type BeginQuorumEpochResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Topics []BeginQuorumEpochResponseTopic +} + +func (*BeginQuorumEpochResponse) Key() int16 { return 53 } +func (*BeginQuorumEpochResponse) MaxVersion() int16 { return 0 } +func (v *BeginQuorumEpochResponse) SetVersion(version int16) { v.Version = version } +func (v *BeginQuorumEpochResponse) GetVersion() int16 { return v.Version } +func (v *BeginQuorumEpochResponse) IsFlexible() bool { return false } +func (v *BeginQuorumEpochResponse) RequestKind() Request { + return &BeginQuorumEpochRequest{Version: v.Version} +} + +func (v *BeginQuorumEpochResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + return dst +} + +func (v *BeginQuorumEpochResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BeginQuorumEpochResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BeginQuorumEpochResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BeginQuorumEpochResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BeginQuorumEpochResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrBeginQuorumEpochResponse returns a pointer to a default BeginQuorumEpochResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBeginQuorumEpochResponse() *BeginQuorumEpochResponse { + var v BeginQuorumEpochResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochResponse. +func (v *BeginQuorumEpochResponse) Default() { +} + +// NewBeginQuorumEpochResponse returns a default BeginQuorumEpochResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochResponse() BeginQuorumEpochResponse { + var v BeginQuorumEpochResponse + v.Default() + return v +} + +type EndQuorumEpochRequestTopicPartition struct { + Partition int32 + + // The current leader ID that is resigning. + LeaderID int32 + + // The current epoch. + LeaderEpoch int32 + + // A sorted list of preferred successors to start the election. + PreferredSuccessors []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochRequestTopicPartition. +func (v *EndQuorumEpochRequestTopicPartition) Default() { +} + +// NewEndQuorumEpochRequestTopicPartition returns a default EndQuorumEpochRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochRequestTopicPartition() EndQuorumEpochRequestTopicPartition { + var v EndQuorumEpochRequestTopicPartition + v.Default() + return v +} + +type EndQuorumEpochRequestTopic struct { + Topic string + + Partitions []EndQuorumEpochRequestTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochRequestTopic. +func (v *EndQuorumEpochRequestTopic) Default() { +} + +// NewEndQuorumEpochRequestTopic returns a default EndQuorumEpochRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochRequestTopic() EndQuorumEpochRequestTopic { + var v EndQuorumEpochRequestTopic + v.Default() + return v +} + +// Part of KIP-595 to replace Kafka's dependence on Zookeeper with a +// Kafka-only raft protocol, +// EndQuorumEpochRequest is sent by a leader to gracefully step down as leader +// (i.e. on shutdown). Stepping down begins a new election. +// +// Since this is relatively Kafka internal, most fields are left undocumented. +type EndQuorumEpochRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ClusterID *string + + Topics []EndQuorumEpochRequestTopic +} + +func (*EndQuorumEpochRequest) Key() int16 { return 54 } +func (*EndQuorumEpochRequest) MaxVersion() int16 { return 0 } +func (v *EndQuorumEpochRequest) SetVersion(version int16) { v.Version = version } +func (v *EndQuorumEpochRequest) GetVersion() int16 { return v.Version } +func (v *EndQuorumEpochRequest) IsFlexible() bool { return false } +func (v *EndQuorumEpochRequest) IsAdminRequest() {} +func (v *EndQuorumEpochRequest) ResponseKind() Response { + r := &EndQuorumEpochResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *EndQuorumEpochRequest) RequestWith(ctx context.Context, r Requestor) (*EndQuorumEpochResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*EndQuorumEpochResponse) + return resp, err +} + +func (v *EndQuorumEpochRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ClusterID + dst = kbin.AppendNullableString(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.PreferredSuccessors + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + } + return dst +} + +func (v *EndQuorumEpochRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndQuorumEpochRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndQuorumEpochRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.ClusterID = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]EndQuorumEpochRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]EndQuorumEpochRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.PreferredSuccessors + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.PreferredSuccessors = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrEndQuorumEpochRequest returns a pointer to a default EndQuorumEpochRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEndQuorumEpochRequest() *EndQuorumEpochRequest { + var v EndQuorumEpochRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochRequest. +func (v *EndQuorumEpochRequest) Default() { +} + +// NewEndQuorumEpochRequest returns a default EndQuorumEpochRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochRequest() EndQuorumEpochRequest { + var v EndQuorumEpochRequest + v.Default() + return v +} + +type EndQuorumEpochResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The ID of the current leader, or -1 if the leader is unknown. + LeaderID int32 + + // The latest known leader epoch. + LeaderEpoch int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochResponseTopicPartition. +func (v *EndQuorumEpochResponseTopicPartition) Default() { +} + +// NewEndQuorumEpochResponseTopicPartition returns a default EndQuorumEpochResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochResponseTopicPartition() EndQuorumEpochResponseTopicPartition { + var v EndQuorumEpochResponseTopicPartition + v.Default() + return v +} + +type EndQuorumEpochResponseTopic struct { + Topic string + + Partitions []EndQuorumEpochResponseTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochResponseTopic. +func (v *EndQuorumEpochResponseTopic) Default() { +} + +// NewEndQuorumEpochResponseTopic returns a default EndQuorumEpochResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochResponseTopic() EndQuorumEpochResponseTopic { + var v EndQuorumEpochResponseTopic + v.Default() + return v +} + +type EndQuorumEpochResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Topics []EndQuorumEpochResponseTopic +} + +func (*EndQuorumEpochResponse) Key() int16 { return 54 } +func (*EndQuorumEpochResponse) MaxVersion() int16 { return 0 } +func (v *EndQuorumEpochResponse) SetVersion(version int16) { v.Version = version } +func (v *EndQuorumEpochResponse) GetVersion() int16 { return v.Version } +func (v *EndQuorumEpochResponse) IsFlexible() bool { return false } +func (v *EndQuorumEpochResponse) RequestKind() Request { + return &EndQuorumEpochRequest{Version: v.Version} +} + +func (v *EndQuorumEpochResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + return dst +} + +func (v *EndQuorumEpochResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndQuorumEpochResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndQuorumEpochResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]EndQuorumEpochResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]EndQuorumEpochResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrEndQuorumEpochResponse returns a pointer to a default EndQuorumEpochResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEndQuorumEpochResponse() *EndQuorumEpochResponse { + var v EndQuorumEpochResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochResponse. +func (v *EndQuorumEpochResponse) Default() { +} + +// NewEndQuorumEpochResponse returns a default EndQuorumEpochResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochResponse() EndQuorumEpochResponse { + var v EndQuorumEpochResponse + v.Default() + return v +} + +// A common struct used in DescribeQuorumResponse. +type DescribeQuorumResponseTopicPartitionReplicaState struct { + ReplicaID int32 + + // The last known log end offset of the follower, or -1 if it is unknown. + LogEndOffset int64 + + // The last known leader wall clock time when a follower fetched from the + // leader, or -1 for the current leader or if unknown for a voter. + // + // This field has a default of -1. + LastFetchTimestamp int64 // v1+ + + // The leader wall clock append time of the offset for which the follower + // made the most recent fetch request, or -1 for the current leader or if + // unknown for a voter. + // + // This field has a default of -1. + LastCaughtUpTimestamp int64 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumResponseTopicPartitionReplicaState. +func (v *DescribeQuorumResponseTopicPartitionReplicaState) Default() { + v.LastFetchTimestamp = -1 + v.LastCaughtUpTimestamp = -1 +} + +// NewDescribeQuorumResponseTopicPartitionReplicaState returns a default DescribeQuorumResponseTopicPartitionReplicaState +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumResponseTopicPartitionReplicaState() DescribeQuorumResponseTopicPartitionReplicaState { + var v DescribeQuorumResponseTopicPartitionReplicaState + v.Default() + return v +} + +type DescribeQuorumRequestTopicPartition struct { + Partition int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumRequestTopicPartition. +func (v *DescribeQuorumRequestTopicPartition) Default() { +} + +// NewDescribeQuorumRequestTopicPartition returns a default DescribeQuorumRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumRequestTopicPartition() DescribeQuorumRequestTopicPartition { + var v DescribeQuorumRequestTopicPartition + v.Default() + return v +} + +type DescribeQuorumRequestTopic struct { + Topic string + + Partitions []DescribeQuorumRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumRequestTopic. +func (v *DescribeQuorumRequestTopic) Default() { +} + +// NewDescribeQuorumRequestTopic returns a default DescribeQuorumRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumRequestTopic() DescribeQuorumRequestTopic { + var v DescribeQuorumRequestTopic + v.Default() + return v +} + +// Part of KIP-642 (and KIP-595) to replace Kafka's dependence on Zookeeper with a +// Kafka-only raft protocol, +// DescribeQuorumRequest is sent by a leader to describe the quorum. +type DescribeQuorumRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + Topics []DescribeQuorumRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeQuorumRequest) Key() int16 { return 55 } +func (*DescribeQuorumRequest) MaxVersion() int16 { return 1 } +func (v *DescribeQuorumRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeQuorumRequest) GetVersion() int16 { return v.Version } +func (v *DescribeQuorumRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeQuorumRequest) IsAdminRequest() {} +func (v *DescribeQuorumRequest) ResponseKind() Response { + r := &DescribeQuorumResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeQuorumRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeQuorumResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeQuorumResponse) + return resp, err +} + +func (v *DescribeQuorumRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeQuorumRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeQuorumRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeQuorumRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeQuorumRequest returns a pointer to a default DescribeQuorumRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeQuorumRequest() *DescribeQuorumRequest { + var v DescribeQuorumRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumRequest. +func (v *DescribeQuorumRequest) Default() { +} + +// NewDescribeQuorumRequest returns a default DescribeQuorumRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumRequest() DescribeQuorumRequest { + var v DescribeQuorumRequest + v.Default() + return v +} + +type DescribeQuorumResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The ID of the current leader, or -1 if the leader is unknown. + LeaderID int32 + + // The latest known leader epoch. + LeaderEpoch int32 + + HighWatermark int64 + + CurrentVoters []DescribeQuorumResponseTopicPartitionReplicaState + + Observers []DescribeQuorumResponseTopicPartitionReplicaState + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumResponseTopicPartition. +func (v *DescribeQuorumResponseTopicPartition) Default() { +} + +// NewDescribeQuorumResponseTopicPartition returns a default DescribeQuorumResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumResponseTopicPartition() DescribeQuorumResponseTopicPartition { + var v DescribeQuorumResponseTopicPartition + v.Default() + return v +} + +type DescribeQuorumResponseTopic struct { + Topic string + + Partitions []DescribeQuorumResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumResponseTopic. +func (v *DescribeQuorumResponseTopic) Default() { +} + +// NewDescribeQuorumResponseTopic returns a default DescribeQuorumResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumResponseTopic() DescribeQuorumResponseTopic { + var v DescribeQuorumResponseTopic + v.Default() + return v +} + +type DescribeQuorumResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Topics []DescribeQuorumResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeQuorumResponse) Key() int16 { return 55 } +func (*DescribeQuorumResponse) MaxVersion() int16 { return 1 } +func (v *DescribeQuorumResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeQuorumResponse) GetVersion() int16 { return v.Version } +func (v *DescribeQuorumResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeQuorumResponse) RequestKind() Request { + return &DescribeQuorumRequest{Version: v.Version} +} + +func (v *DescribeQuorumResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.HighWatermark + dst = kbin.AppendInt64(dst, v) + } + { + v := v.CurrentVoters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LogEndOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.LastFetchTimestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.LastCaughtUpTimestamp + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Observers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LogEndOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.LastFetchTimestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.LastCaughtUpTimestamp + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeQuorumResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeQuorumResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeQuorumResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := b.Int64() + s.HighWatermark = v + } + { + v := s.CurrentVoters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumResponseTopicPartitionReplicaState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.ReplicaID = v + } + { + v := b.Int64() + s.LogEndOffset = v + } + if version >= 1 { + v := b.Int64() + s.LastFetchTimestamp = v + } + if version >= 1 { + v := b.Int64() + s.LastCaughtUpTimestamp = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.CurrentVoters = v + } + { + v := s.Observers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumResponseTopicPartitionReplicaState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.ReplicaID = v + } + { + v := b.Int64() + s.LogEndOffset = v + } + if version >= 1 { + v := b.Int64() + s.LastFetchTimestamp = v + } + if version >= 1 { + v := b.Int64() + s.LastCaughtUpTimestamp = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Observers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeQuorumResponse returns a pointer to a default DescribeQuorumResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeQuorumResponse() *DescribeQuorumResponse { + var v DescribeQuorumResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumResponse. +func (v *DescribeQuorumResponse) Default() { +} + +// NewDescribeQuorumResponse returns a default DescribeQuorumResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumResponse() DescribeQuorumResponse { + var v DescribeQuorumResponse + v.Default() + return v +} + +type AlterPartitionRequestTopicPartitionNewEpochISR struct { + // The broker ID . + BrokerID int32 + + // The broker's epoch; -1 if the epoch check is not supported. + // + // This field has a default of -1. + BrokerEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionRequestTopicPartitionNewEpochISR. +func (v *AlterPartitionRequestTopicPartitionNewEpochISR) Default() { + v.BrokerEpoch = -1 +} + +// NewAlterPartitionRequestTopicPartitionNewEpochISR returns a default AlterPartitionRequestTopicPartitionNewEpochISR +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionRequestTopicPartitionNewEpochISR() AlterPartitionRequestTopicPartitionNewEpochISR { + var v AlterPartitionRequestTopicPartitionNewEpochISR + v.Default() + return v +} + +type AlterPartitionRequestTopicPartition struct { + Partition int32 + + // The leader epoch of this partition. + LeaderEpoch int32 + + // The ISR for this partition. + NewISR []int32 // v0-v2 + + NewEpochISR []AlterPartitionRequestTopicPartitionNewEpochISR // v3+ + + // 1 if the partition is recovering from unclean leader election; 0 otherwise + LeaderRecoveryState int8 // v1+ + + // The expected epoch of the partition which is being updated. + // For a legacy cluster, this is the ZkVersion in the LeaderAndISR request. + PartitionEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionRequestTopicPartition. +func (v *AlterPartitionRequestTopicPartition) Default() { +} + +// NewAlterPartitionRequestTopicPartition returns a default AlterPartitionRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionRequestTopicPartition() AlterPartitionRequestTopicPartition { + var v AlterPartitionRequestTopicPartition + v.Default() + return v +} + +type AlterPartitionRequestTopic struct { + Topic string // v0-v1 + + TopicID [16]byte // v2+ + + Partitions []AlterPartitionRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionRequestTopic. +func (v *AlterPartitionRequestTopic) Default() { +} + +// NewAlterPartitionRequestTopic returns a default AlterPartitionRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionRequestTopic() AlterPartitionRequestTopic { + var v AlterPartitionRequestTopic + v.Default() + return v +} + +// AlterPartitionRequest, proposed in KIP-497 and introduced in Kafka 2.7.0, +// is an admin request to modify ISR. +// +// Version 3 was added for KIP-903 and replaced NewISR. +type AlterPartitionRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The ID of the requesting broker. + BrokerID int32 + + // The epoch of the requesting broker. + // + // This field has a default of -1. + BrokerEpoch int64 + + Topics []AlterPartitionRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterPartitionRequest) Key() int16 { return 56 } +func (*AlterPartitionRequest) MaxVersion() int16 { return 3 } +func (v *AlterPartitionRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterPartitionRequest) GetVersion() int16 { return v.Version } +func (v *AlterPartitionRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterPartitionRequest) IsAdminRequest() {} +func (v *AlterPartitionRequest) ResponseKind() Response { + r := &AlterPartitionResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterPartitionRequest) RequestWith(ctx context.Context, r Requestor) (*AlterPartitionResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterPartitionResponse) + return resp, err +} + +func (v *AlterPartitionRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 1 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 2 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 2 { + v := v.NewISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.NewEpochISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.LeaderRecoveryState + dst = kbin.AppendInt8(dst, v) + } + { + v := v.PartitionEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterPartitionRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterPartitionRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterPartitionRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + v := b.Int64() + s.BrokerEpoch = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 2 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if version >= 0 && version <= 2 { + v := s.NewISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.NewISR = v + } + if version >= 3 { + v := s.NewEpochISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionRequestTopicPartitionNewEpochISR, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + v := b.Int32() + s.BrokerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.NewEpochISR = v + } + if version >= 1 { + v := b.Int8() + s.LeaderRecoveryState = v + } + { + v := b.Int32() + s.PartitionEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterPartitionRequest returns a pointer to a default AlterPartitionRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterPartitionRequest() *AlterPartitionRequest { + var v AlterPartitionRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionRequest. +func (v *AlterPartitionRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewAlterPartitionRequest returns a default AlterPartitionRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionRequest() AlterPartitionRequest { + var v AlterPartitionRequest + v.Default() + return v +} + +type AlterPartitionResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The broker ID of the leader. + LeaderID int32 + + // The leader epoch of this partition. + LeaderEpoch int32 + + // The in-sync replica ids. + ISR []int32 + + // 1 if the partition is recovering from unclean leader election; 0 otherwise + LeaderRecoveryState int8 // v1+ + + // The current epoch of the partition for KRaft controllers. + // The current ZK version for legacy controllers. + PartitionEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionResponseTopicPartition. +func (v *AlterPartitionResponseTopicPartition) Default() { +} + +// NewAlterPartitionResponseTopicPartition returns a default AlterPartitionResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionResponseTopicPartition() AlterPartitionResponseTopicPartition { + var v AlterPartitionResponseTopicPartition + v.Default() + return v +} + +type AlterPartitionResponseTopic struct { + Topic string // v0-v1 + + TopidID [16]byte // v2+ + + Partitions []AlterPartitionResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionResponseTopic. +func (v *AlterPartitionResponseTopic) Default() { +} + +// NewAlterPartitionResponseTopic returns a default AlterPartitionResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionResponseTopic() AlterPartitionResponseTopic { + var v AlterPartitionResponseTopic + v.Default() + return v +} + +type AlterPartitionResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + ErrorCode int16 + + Topics []AlterPartitionResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterPartitionResponse) Key() int16 { return 56 } +func (*AlterPartitionResponse) MaxVersion() int16 { return 3 } +func (v *AlterPartitionResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterPartitionResponse) GetVersion() int16 { return v.Version } +func (v *AlterPartitionResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterPartitionResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *AlterPartitionResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *AlterPartitionResponse) RequestKind() Request { + return &AlterPartitionRequest{Version: v.Version} +} + +func (v *AlterPartitionResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 1 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 2 { + v := v.TopidID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 1 { + v := v.LeaderRecoveryState + dst = kbin.AppendInt8(dst, v) + } + { + v := v.PartitionEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterPartitionResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterPartitionResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterPartitionResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 2 { + v := b.Uuid() + s.TopidID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + if version >= 1 { + v := b.Int8() + s.LeaderRecoveryState = v + } + { + v := b.Int32() + s.PartitionEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterPartitionResponse returns a pointer to a default AlterPartitionResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterPartitionResponse() *AlterPartitionResponse { + var v AlterPartitionResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionResponse. +func (v *AlterPartitionResponse) Default() { +} + +// NewAlterPartitionResponse returns a default AlterPartitionResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionResponse() AlterPartitionResponse { + var v AlterPartitionResponse + v.Default() + return v +} + +type UpdateFeaturesRequestFeatureUpdate struct { + // The name of the finalized feature to update. + Feature string + + // The new maximum version level for the finalized feature. A value >= 1 is + // valid. A value < 1, is special, and can be used to request the deletion + // of the finalized feature. + MaxVersionLevel int16 + + // When set to true, the finalized feature version level is allowed to be + // downgraded/deleted. The downgrade request will fail if the new maximum + // version level is a value that's not lower than the existing maximum + // finalized version level. + // + // Replaced in v1 with ValidateOnly. + AllowDowngrade bool + + // Determine which type of upgrade will be performed: 1 will perform an + // upgrade only (default), 2 is safe downgrades only (lossless), 3 is + // unsafe downgrades (lossy). + UpgradeType int8 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateFeaturesRequestFeatureUpdate. +func (v *UpdateFeaturesRequestFeatureUpdate) Default() { +} + +// NewUpdateFeaturesRequestFeatureUpdate returns a default UpdateFeaturesRequestFeatureUpdate +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateFeaturesRequestFeatureUpdate() UpdateFeaturesRequestFeatureUpdate { + var v UpdateFeaturesRequestFeatureUpdate + v.Default() + return v +} + +// From KIP-584 and introduced in 2.7.0, this request updates broker-wide features. +type UpdateFeaturesRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // The list of updates to finalized features. + FeatureUpdates []UpdateFeaturesRequestFeatureUpdate + + // True if we should validate the request, but not perform the upgrade or + // downgrade. + ValidateOnly bool // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*UpdateFeaturesRequest) Key() int16 { return 57 } +func (*UpdateFeaturesRequest) MaxVersion() int16 { return 1 } +func (v *UpdateFeaturesRequest) SetVersion(version int16) { v.Version = version } +func (v *UpdateFeaturesRequest) GetVersion() int16 { return v.Version } +func (v *UpdateFeaturesRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *UpdateFeaturesRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *UpdateFeaturesRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *UpdateFeaturesRequest) IsAdminRequest() {} +func (v *UpdateFeaturesRequest) ResponseKind() Response { + r := &UpdateFeaturesResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *UpdateFeaturesRequest) RequestWith(ctx context.Context, r Requestor) (*UpdateFeaturesResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*UpdateFeaturesResponse) + return resp, err +} + +func (v *UpdateFeaturesRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.FeatureUpdates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Feature + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MaxVersionLevel + dst = kbin.AppendInt16(dst, v) + } + if version >= 0 && version <= 0 { + v := v.AllowDowngrade + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.UpgradeType + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UpdateFeaturesRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UpdateFeaturesRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UpdateFeaturesRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := s.FeatureUpdates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateFeaturesRequestFeatureUpdate, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Feature = v + } + { + v := b.Int16() + s.MaxVersionLevel = v + } + if version >= 0 && version <= 0 { + v := b.Bool() + s.AllowDowngrade = v + } + if version >= 1 { + v := b.Int8() + s.UpgradeType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.FeatureUpdates = v + } + if version >= 1 { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUpdateFeaturesRequest returns a pointer to a default UpdateFeaturesRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUpdateFeaturesRequest() *UpdateFeaturesRequest { + var v UpdateFeaturesRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateFeaturesRequest. +func (v *UpdateFeaturesRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewUpdateFeaturesRequest returns a default UpdateFeaturesRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateFeaturesRequest() UpdateFeaturesRequest { + var v UpdateFeaturesRequest + v.Default() + return v +} + +type UpdateFeaturesResponseResult struct { + // The name of the finalized feature. + Feature string + + // The feature update error code, if any. + ErrorCode int16 + + // The feature update error, if any. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateFeaturesResponseResult. +func (v *UpdateFeaturesResponseResult) Default() { +} + +// NewUpdateFeaturesResponseResult returns a default UpdateFeaturesResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateFeaturesResponseResult() UpdateFeaturesResponseResult { + var v UpdateFeaturesResponseResult + v.Default() + return v +} + +type UpdateFeaturesResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The top level error code, if any. + ErrorCode int16 + + // An informative message if the request errored, if any. + ErrorMessage *string + + // The results for each feature update request. + Results []UpdateFeaturesResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*UpdateFeaturesResponse) Key() int16 { return 57 } +func (*UpdateFeaturesResponse) MaxVersion() int16 { return 1 } +func (v *UpdateFeaturesResponse) SetVersion(version int16) { v.Version = version } +func (v *UpdateFeaturesResponse) GetVersion() int16 { return v.Version } +func (v *UpdateFeaturesResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *UpdateFeaturesResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *UpdateFeaturesResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *UpdateFeaturesResponse) RequestKind() Request { + return &UpdateFeaturesRequest{Version: v.Version} +} + +func (v *UpdateFeaturesResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Feature + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UpdateFeaturesResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UpdateFeaturesResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UpdateFeaturesResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateFeaturesResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Feature = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUpdateFeaturesResponse returns a pointer to a default UpdateFeaturesResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUpdateFeaturesResponse() *UpdateFeaturesResponse { + var v UpdateFeaturesResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateFeaturesResponse. +func (v *UpdateFeaturesResponse) Default() { +} + +// NewUpdateFeaturesResponse returns a default UpdateFeaturesResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateFeaturesResponse() UpdateFeaturesResponse { + var v UpdateFeaturesResponse + v.Default() + return v +} + +// Introduced for KIP-590, EnvelopeRequest is what brokers use to wrap an +// incoming request before forwarding it to another broker. +type EnvelopeRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The embedded request header and data. + RequestData []byte + + // Value of the initial client principal when the request is redirected by a broker. + RequestPrincipal []byte + + // The original client's address in bytes. + ClientHostAddress []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*EnvelopeRequest) Key() int16 { return 58 } +func (*EnvelopeRequest) MaxVersion() int16 { return 0 } +func (v *EnvelopeRequest) SetVersion(version int16) { v.Version = version } +func (v *EnvelopeRequest) GetVersion() int16 { return v.Version } +func (v *EnvelopeRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *EnvelopeRequest) IsAdminRequest() {} +func (v *EnvelopeRequest) ResponseKind() Response { + r := &EnvelopeResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *EnvelopeRequest) RequestWith(ctx context.Context, r Requestor) (*EnvelopeResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*EnvelopeResponse) + return resp, err +} + +func (v *EnvelopeRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.RequestData + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.RequestPrincipal + if isFlexible { + dst = kbin.AppendCompactNullableBytes(dst, v) + } else { + dst = kbin.AppendNullableBytes(dst, v) + } + } + { + v := v.ClientHostAddress + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *EnvelopeRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EnvelopeRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EnvelopeRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.RequestData = v + } + { + var v []byte + if isFlexible { + v = b.CompactNullableBytes() + } else { + v = b.NullableBytes() + } + s.RequestPrincipal = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.ClientHostAddress = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrEnvelopeRequest returns a pointer to a default EnvelopeRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEnvelopeRequest() *EnvelopeRequest { + var v EnvelopeRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EnvelopeRequest. +func (v *EnvelopeRequest) Default() { +} + +// NewEnvelopeRequest returns a default EnvelopeRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewEnvelopeRequest() EnvelopeRequest { + var v EnvelopeRequest + v.Default() + return v +} + +type EnvelopeResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The embedded response header and data. + ResponseData []byte + + // The error code, or 0 if there was no error. + // + // NOT_CONTROLLER is returned when the request is not sent to the controller. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if inter-broker authorization failed. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*EnvelopeResponse) Key() int16 { return 58 } +func (*EnvelopeResponse) MaxVersion() int16 { return 0 } +func (v *EnvelopeResponse) SetVersion(version int16) { v.Version = version } +func (v *EnvelopeResponse) GetVersion() int16 { return v.Version } +func (v *EnvelopeResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *EnvelopeResponse) RequestKind() Request { return &EnvelopeRequest{Version: v.Version} } + +func (v *EnvelopeResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ResponseData + if isFlexible { + dst = kbin.AppendCompactNullableBytes(dst, v) + } else { + dst = kbin.AppendNullableBytes(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *EnvelopeResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EnvelopeResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EnvelopeResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactNullableBytes() + } else { + v = b.NullableBytes() + } + s.ResponseData = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrEnvelopeResponse returns a pointer to a default EnvelopeResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEnvelopeResponse() *EnvelopeResponse { + var v EnvelopeResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EnvelopeResponse. +func (v *EnvelopeResponse) Default() { +} + +// NewEnvelopeResponse returns a default EnvelopeResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewEnvelopeResponse() EnvelopeResponse { + var v EnvelopeResponse + v.Default() + return v +} + +type FetchSnapshotRequestTopicPartitionSnapshotID struct { + EndOffset int64 + + Epoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotRequestTopicPartitionSnapshotID. +func (v *FetchSnapshotRequestTopicPartitionSnapshotID) Default() { +} + +// NewFetchSnapshotRequestTopicPartitionSnapshotID returns a default FetchSnapshotRequestTopicPartitionSnapshotID +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotRequestTopicPartitionSnapshotID() FetchSnapshotRequestTopicPartitionSnapshotID { + var v FetchSnapshotRequestTopicPartitionSnapshotID + v.Default() + return v +} + +type FetchSnapshotRequestTopicPartition struct { + // The partition to fetch. + Partition int32 + + // The current leader epoch of the partition, or -1 for an unknown leader epoch. + CurrentLeaderEpoch int32 + + // The snapshot end offset and epoch to fetch. + SnapshotID FetchSnapshotRequestTopicPartitionSnapshotID + + // The byte position within the snapshot to start fetching from. + Position int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotRequestTopicPartition. +func (v *FetchSnapshotRequestTopicPartition) Default() { + { + v := &v.SnapshotID + _ = v + } +} + +// NewFetchSnapshotRequestTopicPartition returns a default FetchSnapshotRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotRequestTopicPartition() FetchSnapshotRequestTopicPartition { + var v FetchSnapshotRequestTopicPartition + v.Default() + return v +} + +type FetchSnapshotRequestTopic struct { + // The name of the topic to fetch. + Topic string + + // The partitions to fetch. + Partitions []FetchSnapshotRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotRequestTopic. +func (v *FetchSnapshotRequestTopic) Default() { +} + +// NewFetchSnapshotRequestTopic returns a default FetchSnapshotRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotRequestTopic() FetchSnapshotRequestTopic { + var v FetchSnapshotRequestTopic + v.Default() + return v +} + +// Introduced for KIP-630, FetchSnapshotRequest is a part of the inter-Kafka +// raft protocol to remove the dependency on Zookeeper. +type FetchSnapshotRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The ClusterID if known, this is used to validate metadata fetches prior to + // broker registration. + ClusterID *string // tag 0 + + // The broker ID of the follower. + // + // This field has a default of -1. + ReplicaID int32 + + // The maximum bytes to fetch from all of the snapshots. + // + // This field has a default of 0x7fffffff. + MaxBytes int32 + + // The topics to fetch. + Topics []FetchSnapshotRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*FetchSnapshotRequest) Key() int16 { return 59 } +func (*FetchSnapshotRequest) MaxVersion() int16 { return 0 } +func (v *FetchSnapshotRequest) SetVersion(version int16) { v.Version = version } +func (v *FetchSnapshotRequest) GetVersion() int16 { return v.Version } +func (v *FetchSnapshotRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *FetchSnapshotRequest) ResponseKind() Response { + r := &FetchSnapshotResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *FetchSnapshotRequest) RequestWith(ctx context.Context, r Requestor) (*FetchSnapshotResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*FetchSnapshotResponse) + return resp, err +} + +func (v *FetchSnapshotRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MaxBytes + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CurrentLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := &v.SnapshotID + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + { + v := v.Position + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + var toEncode []uint32 + if v.ClusterID != nil { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.ClusterID + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fClusterID: + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fClusterID + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FetchSnapshotRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FetchSnapshotRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FetchSnapshotRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ReplicaID = v + } + { + v := b.Int32() + s.MaxBytes = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchSnapshotRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchSnapshotRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.CurrentLeaderEpoch = v + } + { + v := &s.SnapshotID + v.Default() + s := v + { + v := b.Int64() + s.EndOffset = v + } + { + v := b.Int32() + s.Epoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + { + v := b.Int64() + s.Position = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ClusterID = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrFetchSnapshotRequest returns a pointer to a default FetchSnapshotRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFetchSnapshotRequest() *FetchSnapshotRequest { + var v FetchSnapshotRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotRequest. +func (v *FetchSnapshotRequest) Default() { + v.ReplicaID = -1 + v.MaxBytes = 2147483647 +} + +// NewFetchSnapshotRequest returns a default FetchSnapshotRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotRequest() FetchSnapshotRequest { + var v FetchSnapshotRequest + v.Default() + return v +} + +type FetchSnapshotResponseTopicPartitionSnapshotID struct { + EndOffset int64 + + Epoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponseTopicPartitionSnapshotID. +func (v *FetchSnapshotResponseTopicPartitionSnapshotID) Default() { +} + +// NewFetchSnapshotResponseTopicPartitionSnapshotID returns a default FetchSnapshotResponseTopicPartitionSnapshotID +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponseTopicPartitionSnapshotID() FetchSnapshotResponseTopicPartitionSnapshotID { + var v FetchSnapshotResponseTopicPartitionSnapshotID + v.Default() + return v +} + +type FetchSnapshotResponseTopicPartitionCurrentLeader struct { + LeaderID int32 + + LeaderEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponseTopicPartitionCurrentLeader. +func (v *FetchSnapshotResponseTopicPartitionCurrentLeader) Default() { +} + +// NewFetchSnapshotResponseTopicPartitionCurrentLeader returns a default FetchSnapshotResponseTopicPartitionCurrentLeader +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponseTopicPartitionCurrentLeader() FetchSnapshotResponseTopicPartitionCurrentLeader { + var v FetchSnapshotResponseTopicPartitionCurrentLeader + v.Default() + return v +} + +type FetchSnapshotResponseTopicPartition struct { + // The partition. + Partition int32 + + // An error code, or 0 if there was no fetch error. + ErrorCode int16 + + // The snapshot end offset and epoch to fetch. + SnapshotID FetchSnapshotResponseTopicPartitionSnapshotID + + // The ID of the current leader (or -1 if unknown) and the latest known + // leader epoch. + CurrentLeader FetchSnapshotResponseTopicPartitionCurrentLeader // tag 0 + + // The total size of the snapshot. + Size int64 + + // The starting byte position within the snapshot included in the Bytes + // field. + Position int64 + + // Snapshot data. + Bytes []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponseTopicPartition. +func (v *FetchSnapshotResponseTopicPartition) Default() { + { + v := &v.SnapshotID + _ = v + } + { + v := &v.CurrentLeader + _ = v + } +} + +// NewFetchSnapshotResponseTopicPartition returns a default FetchSnapshotResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponseTopicPartition() FetchSnapshotResponseTopicPartition { + var v FetchSnapshotResponseTopicPartition + v.Default() + return v +} + +type FetchSnapshotResponseTopic struct { + // The name of the topic to fetch. + Topic string + + // The partitions to fetch. + Partitions []FetchSnapshotResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponseTopic. +func (v *FetchSnapshotResponseTopic) Default() { +} + +// NewFetchSnapshotResponseTopic returns a default FetchSnapshotResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponseTopic() FetchSnapshotResponseTopic { + var v FetchSnapshotResponseTopic + v.Default() + return v +} + +// FetchSnapshotResponse is a response for a FetchSnapshotRequest. +type FetchSnapshotResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The top level response error code. + ErrorCode int16 + + // The topics to fetch. + Topics []FetchSnapshotResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*FetchSnapshotResponse) Key() int16 { return 59 } +func (*FetchSnapshotResponse) MaxVersion() int16 { return 0 } +func (v *FetchSnapshotResponse) SetVersion(version int16) { v.Version = version } +func (v *FetchSnapshotResponse) GetVersion() int16 { return v.Version } +func (v *FetchSnapshotResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *FetchSnapshotResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *FetchSnapshotResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *FetchSnapshotResponse) RequestKind() Request { + return &FetchSnapshotRequest{Version: v.Version} +} + +func (v *FetchSnapshotResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := &v.SnapshotID + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + { + v := v.Size + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Position + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Bytes + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + var toEncode []uint32 + if !reflect.DeepEqual(v.CurrentLeader, (func() FetchSnapshotResponseTopicPartitionCurrentLeader { + var v FetchSnapshotResponseTopicPartitionCurrentLeader + v.Default() + return v + })()) { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.CurrentLeader + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fCurrentLeader: + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fCurrentLeader + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FetchSnapshotResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FetchSnapshotResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FetchSnapshotResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchSnapshotResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchSnapshotResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := &s.SnapshotID + v.Default() + s := v + { + v := b.Int64() + s.EndOffset = v + } + { + v := b.Int32() + s.Epoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + { + v := b.Int64() + s.Size = v + } + { + v := b.Int64() + s.Position = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.Bytes = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.CurrentLeader + v.Default() + s := v + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + } + } + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrFetchSnapshotResponse returns a pointer to a default FetchSnapshotResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFetchSnapshotResponse() *FetchSnapshotResponse { + var v FetchSnapshotResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponse. +func (v *FetchSnapshotResponse) Default() { +} + +// NewFetchSnapshotResponse returns a default FetchSnapshotResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponse() FetchSnapshotResponse { + var v FetchSnapshotResponse + v.Default() + return v +} + +// Introduced for KIP-700, DescribeClusterRequest is effectively an "admin" +// type metadata request for information that producers or consumers do not +// need to care about. +type DescribeClusterRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Whether to include cluster authorized operations. This requires DESCRIBE + // on CLUSTER. + IncludeClusterAuthorizedOperations bool + + // The endpoint type to describe. 1=brokers, 2=controllers. + // + // This field has a default of 1. + EndpointType int8 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeClusterRequest) Key() int16 { return 60 } +func (*DescribeClusterRequest) MaxVersion() int16 { return 1 } +func (v *DescribeClusterRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeClusterRequest) GetVersion() int16 { return v.Version } +func (v *DescribeClusterRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeClusterRequest) ResponseKind() Response { + r := &DescribeClusterResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeClusterRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeClusterResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeClusterResponse) + return resp, err +} + +func (v *DescribeClusterRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.IncludeClusterAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.EndpointType + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeClusterRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeClusterRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeClusterRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Bool() + s.IncludeClusterAuthorizedOperations = v + } + if version >= 1 { + v := b.Int8() + s.EndpointType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeClusterRequest returns a pointer to a default DescribeClusterRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeClusterRequest() *DescribeClusterRequest { + var v DescribeClusterRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClusterRequest. +func (v *DescribeClusterRequest) Default() { + v.EndpointType = 1 +} + +// NewDescribeClusterRequest returns a default DescribeClusterRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClusterRequest() DescribeClusterRequest { + var v DescribeClusterRequest + v.Default() + return v +} + +type DescribeClusterResponseBroker struct { + // NodeID is the node ID of a Kafka broker. + NodeID int32 + + // Host is the hostname of a Kafka broker. + Host string + + // Port is the port of a Kafka broker. + Port int32 + + // Rack is the rack this Kafka broker is in, if any. + Rack *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClusterResponseBroker. +func (v *DescribeClusterResponseBroker) Default() { +} + +// NewDescribeClusterResponseBroker returns a default DescribeClusterResponseBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClusterResponseBroker() DescribeClusterResponseBroker { + var v DescribeClusterResponseBroker + v.Default() + return v +} + +// DescribeClusterResponse is a response to a DescribeClusterRequest. +type DescribeClusterResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The top level response error code. + ErrorCode int16 + + // The top level error message, if any. + ErrorMessage *string + + // The endpoint type that was described. 1=brokers, 2=controllers. + // + // This field has a default of 1. + EndpointType int8 // v1+ + + // The cluster ID that responding broker belongs to. + ClusterID string + + // The ID of the controller broker. + // + // This field has a default of -1. + ControllerID int32 + + // Brokers is a set of alive Kafka brokers (this mirrors MetadataResponse.Brokers). + Brokers []DescribeClusterResponseBroker + + // 32-bit bitfield to represent authorized operations for this cluster. + // + // This field has a default of -2147483648. + ClusterAuthorizedOperations int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeClusterResponse) Key() int16 { return 60 } +func (*DescribeClusterResponse) MaxVersion() int16 { return 1 } +func (v *DescribeClusterResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeClusterResponse) GetVersion() int16 { return v.Version } +func (v *DescribeClusterResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeClusterResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *DescribeClusterResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeClusterResponse) RequestKind() Request { + return &DescribeClusterRequest{Version: v.Version} +} + +func (v *DescribeClusterResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.EndpointType + dst = kbin.AppendInt8(dst, v) + } + { + v := v.ClusterID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Brokers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ClusterAuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeClusterResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeClusterResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeClusterResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if version >= 1 { + v := b.Int8() + s.EndpointType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClusterID = v + } + { + v := b.Int32() + s.ControllerID = v + } + { + v := s.Brokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClusterResponseBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Brokers = v + } + { + v := b.Int32() + s.ClusterAuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeClusterResponse returns a pointer to a default DescribeClusterResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeClusterResponse() *DescribeClusterResponse { + var v DescribeClusterResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClusterResponse. +func (v *DescribeClusterResponse) Default() { + v.EndpointType = 1 + v.ControllerID = -1 + v.ClusterAuthorizedOperations = -2147483648 +} + +// NewDescribeClusterResponse returns a default DescribeClusterResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClusterResponse() DescribeClusterResponse { + var v DescribeClusterResponse + v.Default() + return v +} + +type DescribeProducersRequestTopic struct { + Topic string + + // The partitions to list producers for for the given topic. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersRequestTopic. +func (v *DescribeProducersRequestTopic) Default() { +} + +// NewDescribeProducersRequestTopic returns a default DescribeProducersRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersRequestTopic() DescribeProducersRequestTopic { + var v DescribeProducersRequestTopic + v.Default() + return v +} + +// Introduced for KIP-664, DescribeProducersRequest allows for introspecting +// the state of the transaction coordinator. This request can be used to detect +// hanging transactions or other EOS-related problems. +// +// This request allows for describing the state of the active +// idempotent/transactional producers. +type DescribeProducersRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The topics to describe producers for. + Topics []DescribeProducersRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeProducersRequest) Key() int16 { return 61 } +func (*DescribeProducersRequest) MaxVersion() int16 { return 0 } +func (v *DescribeProducersRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeProducersRequest) GetVersion() int16 { return v.Version } +func (v *DescribeProducersRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeProducersRequest) ResponseKind() Response { + r := &DescribeProducersResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeProducersRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeProducersResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeProducersResponse) + return resp, err +} + +func (v *DescribeProducersRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeProducersRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeProducersRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeProducersRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeProducersRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeProducersRequest returns a pointer to a default DescribeProducersRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeProducersRequest() *DescribeProducersRequest { + var v DescribeProducersRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersRequest. +func (v *DescribeProducersRequest) Default() { +} + +// NewDescribeProducersRequest returns a default DescribeProducersRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersRequest() DescribeProducersRequest { + var v DescribeProducersRequest + v.Default() + return v +} + +type DescribeProducersResponseTopicPartitionActiveProducer struct { + ProducerID int64 + + ProducerEpoch int32 + + // The last sequence produced. + // + // This field has a default of -1. + LastSequence int32 + + // The last timestamp produced. + // + // This field has a default of -1. + LastTimestamp int64 + + // The epoch of the transactional coordinator for this last produce. + CoordinatorEpoch int32 + + // The first offset of the transaction. + // + // This field has a default of -1. + CurrentTxnStartOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersResponseTopicPartitionActiveProducer. +func (v *DescribeProducersResponseTopicPartitionActiveProducer) Default() { + v.LastSequence = -1 + v.LastTimestamp = -1 + v.CurrentTxnStartOffset = -1 +} + +// NewDescribeProducersResponseTopicPartitionActiveProducer returns a default DescribeProducersResponseTopicPartitionActiveProducer +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersResponseTopicPartitionActiveProducer() DescribeProducersResponseTopicPartitionActiveProducer { + var v DescribeProducersResponseTopicPartitionActiveProducer + v.Default() + return v +} + +type DescribeProducersResponseTopicPartition struct { + Partition int32 + + // The partition error code, or 0 if there was no error. + // + // NOT_LEADER_OR_FOLLOWER is returned if the broker receiving this request + // is not the leader of the partition. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the user does not have Describe + // permissions on the topic. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the partition is not known to exist. + // + // Other errors may be returned corresponding to the partition being offline, etc. + ErrorCode int16 + + // The partition error message, which may be null if no additional details are available. + ErrorMessage *string + + // The current idempotent or transactional producers producing to this partition, + // and the metadata related to their produce requests. + ActiveProducers []DescribeProducersResponseTopicPartitionActiveProducer + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersResponseTopicPartition. +func (v *DescribeProducersResponseTopicPartition) Default() { +} + +// NewDescribeProducersResponseTopicPartition returns a default DescribeProducersResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersResponseTopicPartition() DescribeProducersResponseTopicPartition { + var v DescribeProducersResponseTopicPartition + v.Default() + return v +} + +type DescribeProducersResponseTopic struct { + Topic string + + Partitions []DescribeProducersResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersResponseTopic. +func (v *DescribeProducersResponseTopic) Default() { +} + +// NewDescribeProducersResponseTopic returns a default DescribeProducersResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersResponseTopic() DescribeProducersResponseTopic { + var v DescribeProducersResponseTopic + v.Default() + return v +} + +// DescribeProducersResponse is a response to a DescribeProducersRequest. +type DescribeProducersResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + Topics []DescribeProducersResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeProducersResponse) Key() int16 { return 61 } +func (*DescribeProducersResponse) MaxVersion() int16 { return 0 } +func (v *DescribeProducersResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeProducersResponse) GetVersion() int16 { return v.Version } +func (v *DescribeProducersResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeProducersResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *DescribeProducersResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeProducersResponse) RequestKind() Request { + return &DescribeProducersRequest{Version: v.Version} +} + +func (v *DescribeProducersResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ActiveProducers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LastSequence + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LastTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.CoordinatorEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CurrentTxnStartOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeProducersResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeProducersResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeProducersResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeProducersResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeProducersResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.ActiveProducers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeProducersResponseTopicPartitionActiveProducer, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int32() + s.ProducerEpoch = v + } + { + v := b.Int32() + s.LastSequence = v + } + { + v := b.Int64() + s.LastTimestamp = v + } + { + v := b.Int32() + s.CoordinatorEpoch = v + } + { + v := b.Int64() + s.CurrentTxnStartOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ActiveProducers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeProducersResponse returns a pointer to a default DescribeProducersResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeProducersResponse() *DescribeProducersResponse { + var v DescribeProducersResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersResponse. +func (v *DescribeProducersResponse) Default() { +} + +// NewDescribeProducersResponse returns a default DescribeProducersResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersResponse() DescribeProducersResponse { + var v DescribeProducersResponse + v.Default() + return v +} + +type BrokerRegistrationRequestListener struct { + // The name of this endpoint. + Name string + + // The hostname. + Host string + + // The port. + Port uint16 + + // The security protocol. + SecurityProtocol int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerRegistrationRequestListener. +func (v *BrokerRegistrationRequestListener) Default() { +} + +// NewBrokerRegistrationRequestListener returns a default BrokerRegistrationRequestListener +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerRegistrationRequestListener() BrokerRegistrationRequestListener { + var v BrokerRegistrationRequestListener + v.Default() + return v +} + +type BrokerRegistrationRequestFeature struct { + // The name of the feature. + Name string + + // The minimum supported feature level. + MinSupportedVersion int16 + + // The maximum supported feature level. + MaxSupportedVersion int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerRegistrationRequestFeature. +func (v *BrokerRegistrationRequestFeature) Default() { +} + +// NewBrokerRegistrationRequestFeature returns a default BrokerRegistrationRequestFeature +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerRegistrationRequestFeature() BrokerRegistrationRequestFeature { + var v BrokerRegistrationRequestFeature + v.Default() + return v +} + +// For KIP-500 / KIP-631, BrokerRegistrationRequest is an internal +// broker-to-broker only request. +type BrokerRegistrationRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The broker ID. + BrokerID int32 + + // The cluster ID of the broker process. + ClusterID string + + // The incarnation ID of the broker process. + IncarnationID [16]byte + + // The listeners for this broker. + Listeners []BrokerRegistrationRequestListener + + // Features on this broker. + Features []BrokerRegistrationRequestFeature + + // The rack that this broker is in, if any. + Rack *string + + // If the required configurations for ZK migration are present, this value is + // set to true. + IsMigratingZkBroker bool // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*BrokerRegistrationRequest) Key() int16 { return 62 } +func (*BrokerRegistrationRequest) MaxVersion() int16 { return 1 } +func (v *BrokerRegistrationRequest) SetVersion(version int16) { v.Version = version } +func (v *BrokerRegistrationRequest) GetVersion() int16 { return v.Version } +func (v *BrokerRegistrationRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *BrokerRegistrationRequest) ResponseKind() Response { + r := &BrokerRegistrationResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *BrokerRegistrationRequest) RequestWith(ctx context.Context, r Requestor) (*BrokerRegistrationResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*BrokerRegistrationResponse) + return resp, err +} + +func (v *BrokerRegistrationRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ClusterID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.IncarnationID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Listeners + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendUint16(dst, v) + } + { + v := v.SecurityProtocol + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Features + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MinSupportedVersion + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MaxSupportedVersion + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.IsMigratingZkBroker + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *BrokerRegistrationRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BrokerRegistrationRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BrokerRegistrationRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClusterID = v + } + { + v := b.Uuid() + s.IncarnationID = v + } + { + v := s.Listeners + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BrokerRegistrationRequestListener, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Uint16() + s.Port = v + } + { + v := b.Int16() + s.SecurityProtocol = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Listeners = v + } + { + v := s.Features + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BrokerRegistrationRequestFeature, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int16() + s.MinSupportedVersion = v + } + { + v := b.Int16() + s.MaxSupportedVersion = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Features = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if version >= 1 { + v := b.Bool() + s.IsMigratingZkBroker = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrBrokerRegistrationRequest returns a pointer to a default BrokerRegistrationRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBrokerRegistrationRequest() *BrokerRegistrationRequest { + var v BrokerRegistrationRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerRegistrationRequest. +func (v *BrokerRegistrationRequest) Default() { +} + +// NewBrokerRegistrationRequest returns a default BrokerRegistrationRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerRegistrationRequest() BrokerRegistrationRequest { + var v BrokerRegistrationRequest + v.Default() + return v +} + +// BrokerRegistrationResponse is a response to a BrokerRegistrationRequest. +type BrokerRegistrationResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Any error code, or 0. + ErrorCode int16 + + // The broker's assigned epoch, or -1 if none was assigned. + // + // This field has a default of -1. + BrokerEpoch int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*BrokerRegistrationResponse) Key() int16 { return 62 } +func (*BrokerRegistrationResponse) MaxVersion() int16 { return 1 } +func (v *BrokerRegistrationResponse) SetVersion(version int16) { v.Version = version } +func (v *BrokerRegistrationResponse) GetVersion() int16 { return v.Version } +func (v *BrokerRegistrationResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *BrokerRegistrationResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *BrokerRegistrationResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *BrokerRegistrationResponse) RequestKind() Request { + return &BrokerRegistrationRequest{Version: v.Version} +} + +func (v *BrokerRegistrationResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *BrokerRegistrationResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BrokerRegistrationResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BrokerRegistrationResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.BrokerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrBrokerRegistrationResponse returns a pointer to a default BrokerRegistrationResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBrokerRegistrationResponse() *BrokerRegistrationResponse { + var v BrokerRegistrationResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerRegistrationResponse. +func (v *BrokerRegistrationResponse) Default() { + v.BrokerEpoch = -1 +} + +// NewBrokerRegistrationResponse returns a default BrokerRegistrationResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerRegistrationResponse() BrokerRegistrationResponse { + var v BrokerRegistrationResponse + v.Default() + return v +} + +// For KIP-500 / KIP-631, BrokerHeartbeatRequest is an internal +// broker-to-broker only request. +type BrokerHeartbeatRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The broker ID. + BrokerID int32 + + // The broker's epoch. + // + // This field has a default of -1. + BrokerEpoch int64 + + // The highest metadata offset that the broker has reached. + CurrentMetadataOffset int64 + + // True if the broker wants to be fenced. + WantFence bool + + // True if the broker wants to be shutdown. + WantShutdown bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*BrokerHeartbeatRequest) Key() int16 { return 63 } +func (*BrokerHeartbeatRequest) MaxVersion() int16 { return 0 } +func (v *BrokerHeartbeatRequest) SetVersion(version int16) { v.Version = version } +func (v *BrokerHeartbeatRequest) GetVersion() int16 { return v.Version } +func (v *BrokerHeartbeatRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *BrokerHeartbeatRequest) ResponseKind() Response { + r := &BrokerHeartbeatResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *BrokerHeartbeatRequest) RequestWith(ctx context.Context, r Requestor) (*BrokerHeartbeatResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*BrokerHeartbeatResponse) + return resp, err +} + +func (v *BrokerHeartbeatRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + { + v := v.CurrentMetadataOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.WantFence + dst = kbin.AppendBool(dst, v) + } + { + v := v.WantShutdown + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *BrokerHeartbeatRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BrokerHeartbeatRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BrokerHeartbeatRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + v := b.Int64() + s.BrokerEpoch = v + } + { + v := b.Int64() + s.CurrentMetadataOffset = v + } + { + v := b.Bool() + s.WantFence = v + } + { + v := b.Bool() + s.WantShutdown = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrBrokerHeartbeatRequest returns a pointer to a default BrokerHeartbeatRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBrokerHeartbeatRequest() *BrokerHeartbeatRequest { + var v BrokerHeartbeatRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerHeartbeatRequest. +func (v *BrokerHeartbeatRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewBrokerHeartbeatRequest returns a default BrokerHeartbeatRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerHeartbeatRequest() BrokerHeartbeatRequest { + var v BrokerHeartbeatRequest + v.Default() + return v +} + +// BrokerHeartbeatResponse is a response to a BrokerHeartbeatRequest. +type BrokerHeartbeatResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Any error code, or 0. + ErrorCode int16 + + // True if the broker has approximately caught up with the latest metadata. + IsCaughtUp bool + + // True if the broker is fenced. + // + // This field has a default of true. + IsFenced bool + + // True if the broker should proceed with its shutdown. + ShouldShutdown bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*BrokerHeartbeatResponse) Key() int16 { return 63 } +func (*BrokerHeartbeatResponse) MaxVersion() int16 { return 0 } +func (v *BrokerHeartbeatResponse) SetVersion(version int16) { v.Version = version } +func (v *BrokerHeartbeatResponse) GetVersion() int16 { return v.Version } +func (v *BrokerHeartbeatResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *BrokerHeartbeatResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *BrokerHeartbeatResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *BrokerHeartbeatResponse) RequestKind() Request { + return &BrokerHeartbeatRequest{Version: v.Version} +} + +func (v *BrokerHeartbeatResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.IsCaughtUp + dst = kbin.AppendBool(dst, v) + } + { + v := v.IsFenced + dst = kbin.AppendBool(dst, v) + } + { + v := v.ShouldShutdown + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *BrokerHeartbeatResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BrokerHeartbeatResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BrokerHeartbeatResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Bool() + s.IsCaughtUp = v + } + { + v := b.Bool() + s.IsFenced = v + } + { + v := b.Bool() + s.ShouldShutdown = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrBrokerHeartbeatResponse returns a pointer to a default BrokerHeartbeatResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBrokerHeartbeatResponse() *BrokerHeartbeatResponse { + var v BrokerHeartbeatResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerHeartbeatResponse. +func (v *BrokerHeartbeatResponse) Default() { + v.IsFenced = true +} + +// NewBrokerHeartbeatResponse returns a default BrokerHeartbeatResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerHeartbeatResponse() BrokerHeartbeatResponse { + var v BrokerHeartbeatResponse + v.Default() + return v +} + +// For KIP-500 / KIP-631, UnregisterBrokerRequest is an admin request to +// remove registration of a broker from the cluster. +type UnregisterBrokerRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The broker ID to unregister. + BrokerID int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*UnregisterBrokerRequest) Key() int16 { return 64 } +func (*UnregisterBrokerRequest) MaxVersion() int16 { return 0 } +func (v *UnregisterBrokerRequest) SetVersion(version int16) { v.Version = version } +func (v *UnregisterBrokerRequest) GetVersion() int16 { return v.Version } +func (v *UnregisterBrokerRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *UnregisterBrokerRequest) ResponseKind() Response { + r := &UnregisterBrokerResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *UnregisterBrokerRequest) RequestWith(ctx context.Context, r Requestor) (*UnregisterBrokerResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*UnregisterBrokerResponse) + return resp, err +} + +func (v *UnregisterBrokerRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UnregisterBrokerRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UnregisterBrokerRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UnregisterBrokerRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUnregisterBrokerRequest returns a pointer to a default UnregisterBrokerRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUnregisterBrokerRequest() *UnregisterBrokerRequest { + var v UnregisterBrokerRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UnregisterBrokerRequest. +func (v *UnregisterBrokerRequest) Default() { +} + +// NewUnregisterBrokerRequest returns a default UnregisterBrokerRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewUnregisterBrokerRequest() UnregisterBrokerRequest { + var v UnregisterBrokerRequest + v.Default() + return v +} + +// UnregisterBrokerResponse is a response to a UnregisterBrokerRequest. +type UnregisterBrokerResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Any error code, or 0. + ErrorCode int16 + + // The error message, if any. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*UnregisterBrokerResponse) Key() int16 { return 64 } +func (*UnregisterBrokerResponse) MaxVersion() int16 { return 0 } +func (v *UnregisterBrokerResponse) SetVersion(version int16) { v.Version = version } +func (v *UnregisterBrokerResponse) GetVersion() int16 { return v.Version } +func (v *UnregisterBrokerResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *UnregisterBrokerResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *UnregisterBrokerResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *UnregisterBrokerResponse) RequestKind() Request { + return &UnregisterBrokerRequest{Version: v.Version} +} + +func (v *UnregisterBrokerResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UnregisterBrokerResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UnregisterBrokerResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UnregisterBrokerResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUnregisterBrokerResponse returns a pointer to a default UnregisterBrokerResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUnregisterBrokerResponse() *UnregisterBrokerResponse { + var v UnregisterBrokerResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UnregisterBrokerResponse. +func (v *UnregisterBrokerResponse) Default() { +} + +// NewUnregisterBrokerResponse returns a default UnregisterBrokerResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewUnregisterBrokerResponse() UnregisterBrokerResponse { + var v UnregisterBrokerResponse + v.Default() + return v +} + +// For KIP-664, DescribeTransactionsRequest describes the state of transactions. +type DescribeTransactionsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Array of transactionalIds to include in describe results. If empty, then + // no results will be returned. + TransactionalIDs []string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeTransactionsRequest) Key() int16 { return 65 } +func (*DescribeTransactionsRequest) MaxVersion() int16 { return 0 } +func (v *DescribeTransactionsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeTransactionsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeTransactionsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeTransactionsRequest) ResponseKind() Response { + r := &DescribeTransactionsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeTransactionsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeTransactionsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeTransactionsResponse) + return resp, err +} + +func (v *DescribeTransactionsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.TransactionalIDs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeTransactionsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeTransactionsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeTransactionsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.TransactionalIDs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.TransactionalIDs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeTransactionsRequest returns a pointer to a default DescribeTransactionsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeTransactionsRequest() *DescribeTransactionsRequest { + var v DescribeTransactionsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeTransactionsRequest. +func (v *DescribeTransactionsRequest) Default() { +} + +// NewDescribeTransactionsRequest returns a default DescribeTransactionsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeTransactionsRequest() DescribeTransactionsRequest { + var v DescribeTransactionsRequest + v.Default() + return v +} + +type DescribeTransactionsResponseTransactionStateTopic struct { + Topic string + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeTransactionsResponseTransactionStateTopic. +func (v *DescribeTransactionsResponseTransactionStateTopic) Default() { +} + +// NewDescribeTransactionsResponseTransactionStateTopic returns a default DescribeTransactionsResponseTransactionStateTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeTransactionsResponseTransactionStateTopic() DescribeTransactionsResponseTransactionStateTopic { + var v DescribeTransactionsResponseTransactionStateTopic + v.Default() + return v +} + +type DescribeTransactionsResponseTransactionState struct { + // A potential error code for describing this transaction. + // + // NOT_COORDINATOR is returned if the broker receiving this transactional + // ID does not own the ID. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the coordiantor is laoding. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is being shutdown. + // + // TRANSACTIONAL_ID_NOT_FOUND is returned if the transactional ID could not be found. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the user does not have + // Describe permissions on the transactional ID. + ErrorCode int16 + + // TransactionalID is the transactional ID this record is for. + TransactionalID string + + // State is the state the transaction is in. + State string + + // TimeoutMillis is the timeout of this transaction in milliseconds. + TimeoutMillis int32 + + // StartTimestamp is the timestamp in millis of when this transaction started. + StartTimestamp int64 + + // ProducerID is the ID in use by the transactional ID. + ProducerID int64 + + // ProducerEpoch is the epoch associated with the producer ID. + ProducerEpoch int16 + + // The set of partitions included in the current transaction (if active). + // When a transaction is preparing to commit or abort, this will include + // only partitions which do not have markers. + // + // This does not include topics the user is not authorized to describe. + Topics []DescribeTransactionsResponseTransactionStateTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeTransactionsResponseTransactionState. +func (v *DescribeTransactionsResponseTransactionState) Default() { +} + +// NewDescribeTransactionsResponseTransactionState returns a default DescribeTransactionsResponseTransactionState +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeTransactionsResponseTransactionState() DescribeTransactionsResponseTransactionState { + var v DescribeTransactionsResponseTransactionState + v.Default() + return v +} + +// DescribeTransactionsResponse is a response to a DescribeTransactionsRequest. +type DescribeTransactionsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + TransactionStates []DescribeTransactionsResponseTransactionState + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeTransactionsResponse) Key() int16 { return 65 } +func (*DescribeTransactionsResponse) MaxVersion() int16 { return 0 } +func (v *DescribeTransactionsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeTransactionsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeTransactionsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeTransactionsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *DescribeTransactionsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeTransactionsResponse) RequestKind() Request { + return &DescribeTransactionsRequest{Version: v.Version} +} + +func (v *DescribeTransactionsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.TransactionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.State + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.StartTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeTransactionsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeTransactionsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeTransactionsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.TransactionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeTransactionsResponseTransactionState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.State = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := b.Int64() + s.StartTimestamp = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeTransactionsResponseTransactionStateTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TransactionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeTransactionsResponse returns a pointer to a default DescribeTransactionsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeTransactionsResponse() *DescribeTransactionsResponse { + var v DescribeTransactionsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeTransactionsResponse. +func (v *DescribeTransactionsResponse) Default() { +} + +// NewDescribeTransactionsResponse returns a default DescribeTransactionsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeTransactionsResponse() DescribeTransactionsResponse { + var v DescribeTransactionsResponse + v.Default() + return v +} + +// For KIP-664, ListTransactionsRequest lists transactions. +type ListTransactionsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The transaction states to filter by: if empty, all transactions are + // returned; if non-empty, then only transactions matching one of the + // filtered states will be returned. + // + // For a list of valid states, see the TransactionState enum. + StateFilters []string + + // The producer IDs to filter by: if empty, all transactions will be + // returned; if non-empty, only transactions which match one of the filtered + // producer IDs will be returned + ProducerIDFilters []int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ListTransactionsRequest) Key() int16 { return 66 } +func (*ListTransactionsRequest) MaxVersion() int16 { return 0 } +func (v *ListTransactionsRequest) SetVersion(version int16) { v.Version = version } +func (v *ListTransactionsRequest) GetVersion() int16 { return v.Version } +func (v *ListTransactionsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *ListTransactionsRequest) ResponseKind() Response { + r := &ListTransactionsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ListTransactionsRequest) RequestWith(ctx context.Context, r Requestor) (*ListTransactionsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ListTransactionsResponse) + return resp, err +} + +func (v *ListTransactionsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.StateFilters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + { + v := v.ProducerIDFilters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt64(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListTransactionsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListTransactionsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListTransactionsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.StateFilters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.StateFilters = v + } + { + v := s.ProducerIDFilters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int64, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int64() + a[i] = v + } + v = a + s.ProducerIDFilters = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListTransactionsRequest returns a pointer to a default ListTransactionsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListTransactionsRequest() *ListTransactionsRequest { + var v ListTransactionsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListTransactionsRequest. +func (v *ListTransactionsRequest) Default() { +} + +// NewListTransactionsRequest returns a default ListTransactionsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewListTransactionsRequest() ListTransactionsRequest { + var v ListTransactionsRequest + v.Default() + return v +} + +type ListTransactionsResponseTransactionState struct { + // The transactional ID being used. + TransactionalID string + + // The producer ID of the producer. + ProducerID int64 + + // The current transaction state of the producer. + TransactionState string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListTransactionsResponseTransactionState. +func (v *ListTransactionsResponseTransactionState) Default() { +} + +// NewListTransactionsResponseTransactionState returns a default ListTransactionsResponseTransactionState +// This is a shortcut for creating a struct and calling Default yourself. +func NewListTransactionsResponseTransactionState() ListTransactionsResponseTransactionState { + var v ListTransactionsResponseTransactionState + v.Default() + return v +} + +// ListTransactionsResponse is a response to a ListTransactionsRequest. +type ListTransactionsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // A potential error code for the listing, + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the coordinator is loading. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator receiving this + // request is shutting down. + ErrorCode int16 + + // Set of state filters provided in the request which were unknown to the + // transaction coordinator. + UnknownStateFilters []string + + // TransactionStates contains all transactions that were matched for listing + // in the request. The response elides transactions that the user does not have + // permission to describe (DESCRIBE on TRANSACTIONAL_ID for the transaction). + TransactionStates []ListTransactionsResponseTransactionState + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ListTransactionsResponse) Key() int16 { return 66 } +func (*ListTransactionsResponse) MaxVersion() int16 { return 0 } +func (v *ListTransactionsResponse) SetVersion(version int16) { v.Version = version } +func (v *ListTransactionsResponse) GetVersion() int16 { return v.Version } +func (v *ListTransactionsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *ListTransactionsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *ListTransactionsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ListTransactionsResponse) RequestKind() Request { + return &ListTransactionsRequest{Version: v.Version} +} + +func (v *ListTransactionsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.UnknownStateFilters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + { + v := v.TransactionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.TransactionState + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListTransactionsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListTransactionsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListTransactionsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.UnknownStateFilters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.UnknownStateFilters = v + } + { + v := s.TransactionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListTransactionsResponseTransactionState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionState = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TransactionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListTransactionsResponse returns a pointer to a default ListTransactionsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListTransactionsResponse() *ListTransactionsResponse { + var v ListTransactionsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListTransactionsResponse. +func (v *ListTransactionsResponse) Default() { +} + +// NewListTransactionsResponse returns a default ListTransactionsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewListTransactionsResponse() ListTransactionsResponse { + var v ListTransactionsResponse + v.Default() + return v +} + +// For KIP-730, AllocateProducerIDsRequest is a broker-to-broker request that +// requests a block of producer IDs from the controller broker. This is more +// specifically introduced for raft, but allows for one more request to avoid +// zookeeper in the non-raft world as well. +type AllocateProducerIDsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The ID of the requesting broker. + BrokerID int32 + + // The epoch of the requesting broker. + // + // This field has a default of -1. + BrokerEpoch int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AllocateProducerIDsRequest) Key() int16 { return 67 } +func (*AllocateProducerIDsRequest) MaxVersion() int16 { return 0 } +func (v *AllocateProducerIDsRequest) SetVersion(version int16) { v.Version = version } +func (v *AllocateProducerIDsRequest) GetVersion() int16 { return v.Version } +func (v *AllocateProducerIDsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *AllocateProducerIDsRequest) ResponseKind() Response { + r := &AllocateProducerIDsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AllocateProducerIDsRequest) RequestWith(ctx context.Context, r Requestor) (*AllocateProducerIDsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AllocateProducerIDsResponse) + return resp, err +} + +func (v *AllocateProducerIDsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AllocateProducerIDsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AllocateProducerIDsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AllocateProducerIDsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + v := b.Int64() + s.BrokerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAllocateProducerIDsRequest returns a pointer to a default AllocateProducerIDsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAllocateProducerIDsRequest() *AllocateProducerIDsRequest { + var v AllocateProducerIDsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AllocateProducerIDsRequest. +func (v *AllocateProducerIDsRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewAllocateProducerIDsRequest returns a default AllocateProducerIDsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAllocateProducerIDsRequest() AllocateProducerIDsRequest { + var v AllocateProducerIDsRequest + v.Default() + return v +} + +// AllocateProducerIDsResponse is a response to an AllocateProducerIDsRequest. +type AllocateProducerIDsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // An error code, if any. + ErrorCode int16 + + // The first producer ID in this range, inclusive. + ProducerIDStart int64 + + // The number of producer IDs in this range. + ProducerIDLen int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AllocateProducerIDsResponse) Key() int16 { return 67 } +func (*AllocateProducerIDsResponse) MaxVersion() int16 { return 0 } +func (v *AllocateProducerIDsResponse) SetVersion(version int16) { v.Version = version } +func (v *AllocateProducerIDsResponse) GetVersion() int16 { return v.Version } +func (v *AllocateProducerIDsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *AllocateProducerIDsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *AllocateProducerIDsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AllocateProducerIDsResponse) RequestKind() Request { + return &AllocateProducerIDsRequest{Version: v.Version} +} + +func (v *AllocateProducerIDsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ProducerIDStart + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerIDLen + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AllocateProducerIDsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AllocateProducerIDsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AllocateProducerIDsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.ProducerIDStart = v + } + { + v := b.Int32() + s.ProducerIDLen = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAllocateProducerIDsResponse returns a pointer to a default AllocateProducerIDsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAllocateProducerIDsResponse() *AllocateProducerIDsResponse { + var v AllocateProducerIDsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AllocateProducerIDsResponse. +func (v *AllocateProducerIDsResponse) Default() { +} + +// NewAllocateProducerIDsResponse returns a default AllocateProducerIDsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAllocateProducerIDsResponse() AllocateProducerIDsResponse { + var v AllocateProducerIDsResponse + v.Default() + return v +} + +type ConsumerGroupHeartbeatRequestTopic struct { + // The topic ID. + TopicID [16]byte + + // The partitions. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatRequestTopic. +func (v *ConsumerGroupHeartbeatRequestTopic) Default() { +} + +// NewConsumerGroupHeartbeatRequestTopic returns a default ConsumerGroupHeartbeatRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatRequestTopic() ConsumerGroupHeartbeatRequestTopic { + var v ConsumerGroupHeartbeatRequestTopic + v.Default() + return v +} + +// ConsumerGroupHeartbeat is a part of KIP-848; there are a lot of details +// to this request so documentation is left to the KIP itself. +type ConsumerGroupHeartbeatRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The group ID. + Group string + + // The member ID generated by the coordinator. This must be kept during + // the entire lifetime of the member. + MemberID string + + // The current member epoch; 0 to join the group, -1 to leave, -2 to + // indicate that the static member will rejoin. + MemberEpoch int32 + + // Instance ID of the member; null if not provided or if unchanging. + InstanceID *string + + // The rack ID of the member; null if not provided or if unchanging. + RackID *string + + // RebalanceTimeoutMillis is how long the coordinator will wait on a member + // to revoke its partitions. -1 if unchanging. + // + // This field has a default of -1. + RebalanceTimeoutMillis int32 + + // Subscribed topics; null if unchanging. + SubscribedTopicNames []string + + // The server side assignor to use; null if unchanging. + ServerAssignor *string + + // Topic partitions owned by the member; null if unchanging. + Topics []ConsumerGroupHeartbeatRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ConsumerGroupHeartbeatRequest) Key() int16 { return 68 } +func (*ConsumerGroupHeartbeatRequest) MaxVersion() int16 { return 0 } +func (v *ConsumerGroupHeartbeatRequest) SetVersion(version int16) { v.Version = version } +func (v *ConsumerGroupHeartbeatRequest) GetVersion() int16 { return v.Version } +func (v *ConsumerGroupHeartbeatRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *ConsumerGroupHeartbeatRequest) ResponseKind() Response { + r := &ConsumerGroupHeartbeatResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ConsumerGroupHeartbeatRequest) RequestWith(ctx context.Context, r Requestor) (*ConsumerGroupHeartbeatResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ConsumerGroupHeartbeatResponse) + return resp, err +} + +func (v *ConsumerGroupHeartbeatRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MemberEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.RackID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.RebalanceTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.SubscribedTopicNames + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + { + v := v.ServerAssignor + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ConsumerGroupHeartbeatRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerGroupHeartbeatRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerGroupHeartbeatRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + v := b.Int32() + s.MemberEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.RackID = v + } + { + v := b.Int32() + s.RebalanceTimeoutMillis = v + } + { + v := s.SubscribedTopicNames + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []string{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.SubscribedTopicNames = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ServerAssignor = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []ConsumerGroupHeartbeatRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerGroupHeartbeatRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrConsumerGroupHeartbeatRequest returns a pointer to a default ConsumerGroupHeartbeatRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrConsumerGroupHeartbeatRequest() *ConsumerGroupHeartbeatRequest { + var v ConsumerGroupHeartbeatRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatRequest. +func (v *ConsumerGroupHeartbeatRequest) Default() { + v.RebalanceTimeoutMillis = -1 +} + +// NewConsumerGroupHeartbeatRequest returns a default ConsumerGroupHeartbeatRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatRequest() ConsumerGroupHeartbeatRequest { + var v ConsumerGroupHeartbeatRequest + v.Default() + return v +} + +type ConsumerGroupHeartbeatResponseAssignmentTopic struct { + TopicID [16]byte + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatResponseAssignmentTopic. +func (v *ConsumerGroupHeartbeatResponseAssignmentTopic) Default() { +} + +// NewConsumerGroupHeartbeatResponseAssignmentTopic returns a default ConsumerGroupHeartbeatResponseAssignmentTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatResponseAssignmentTopic() ConsumerGroupHeartbeatResponseAssignmentTopic { + var v ConsumerGroupHeartbeatResponseAssignmentTopic + v.Default() + return v +} + +type ConsumerGroupHeartbeatResponseAssignment struct { + // The topics partitions that can be used immediately. + Topics []ConsumerGroupHeartbeatResponseAssignmentTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatResponseAssignment. +func (v *ConsumerGroupHeartbeatResponseAssignment) Default() { +} + +// NewConsumerGroupHeartbeatResponseAssignment returns a default ConsumerGroupHeartbeatResponseAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatResponseAssignment() ConsumerGroupHeartbeatResponseAssignment { + var v ConsumerGroupHeartbeatResponseAssignment + v.Default() + return v +} + +// ConsumerGroupHeartbeatResponse is returned from a ConsumerGroupHeartbeatRequest. +type ConsumerGroupHeartbeatResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is the error for this response. + // + // Supported errors: + // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - NOT_COORDINATOR (version 0+) + // - COORDINATOR_NOT_AVAILABLE (version 0+) + // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) + // - INVALID_REQUEST (version 0+) + // - UNKNOWN_MEMBER_ID (version 0+) + // - FENCED_MEMBER_EPOCH (version 0+) + // - UNSUPPORTED_ASSIGNOR (version 0+) + // - UNRELEASED_INSTANCE_ID (version 0+) + // - GROUP_MAX_SIZE_REACHED (version 0+) + ErrorCode int16 + + // A supplementary message if this errored. + ErrorMessage *string + + // The member ID generated by the coordinator; provided when joining + // with MemberEpoch=0. + MemberID *string + + // The member epoch. + MemberEpoch int32 + + // The heartbeat interval, in milliseconds. + HeartbeatIntervalMillis int32 + + // The assignment; null if not provided. + Assignment *ConsumerGroupHeartbeatResponseAssignment + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ConsumerGroupHeartbeatResponse) Key() int16 { return 68 } +func (*ConsumerGroupHeartbeatResponse) MaxVersion() int16 { return 0 } +func (v *ConsumerGroupHeartbeatResponse) SetVersion(version int16) { v.Version = version } +func (v *ConsumerGroupHeartbeatResponse) GetVersion() int16 { return v.Version } +func (v *ConsumerGroupHeartbeatResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *ConsumerGroupHeartbeatResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *ConsumerGroupHeartbeatResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ConsumerGroupHeartbeatResponse) RequestKind() Request { + return &ConsumerGroupHeartbeatRequest{Version: v.Version} +} + +func (v *ConsumerGroupHeartbeatResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MemberEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.HeartbeatIntervalMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Assignment + if v == nil { + dst = append(dst, 255) + } else { + dst = append(dst, 1) + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ConsumerGroupHeartbeatResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerGroupHeartbeatResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerGroupHeartbeatResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.MemberID = v + } + { + v := b.Int32() + s.MemberEpoch = v + } + { + v := b.Int32() + s.HeartbeatIntervalMillis = v + } + { + if present := b.Int8(); present != -1 && b.Ok() { + s.Assignment = new(ConsumerGroupHeartbeatResponseAssignment) + v := s.Assignment + v.Default() + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerGroupHeartbeatResponseAssignmentTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrConsumerGroupHeartbeatResponse returns a pointer to a default ConsumerGroupHeartbeatResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrConsumerGroupHeartbeatResponse() *ConsumerGroupHeartbeatResponse { + var v ConsumerGroupHeartbeatResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatResponse. +func (v *ConsumerGroupHeartbeatResponse) Default() { + { + v := &v.Assignment + _ = v + } +} + +// NewConsumerGroupHeartbeatResponse returns a default ConsumerGroupHeartbeatResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatResponse() ConsumerGroupHeartbeatResponse { + var v ConsumerGroupHeartbeatResponse + v.Default() + return v +} + +// RequestForKey returns the request corresponding to the given request key +// or nil if the key is unknown. +func RequestForKey(key int16) Request { + switch key { + default: + return nil + case 0: + return NewPtrProduceRequest() + case 1: + return NewPtrFetchRequest() + case 2: + return NewPtrListOffsetsRequest() + case 3: + return NewPtrMetadataRequest() + case 4: + return NewPtrLeaderAndISRRequest() + case 5: + return NewPtrStopReplicaRequest() + case 6: + return NewPtrUpdateMetadataRequest() + case 7: + return NewPtrControlledShutdownRequest() + case 8: + return NewPtrOffsetCommitRequest() + case 9: + return NewPtrOffsetFetchRequest() + case 10: + return NewPtrFindCoordinatorRequest() + case 11: + return NewPtrJoinGroupRequest() + case 12: + return NewPtrHeartbeatRequest() + case 13: + return NewPtrLeaveGroupRequest() + case 14: + return NewPtrSyncGroupRequest() + case 15: + return NewPtrDescribeGroupsRequest() + case 16: + return NewPtrListGroupsRequest() + case 17: + return NewPtrSASLHandshakeRequest() + case 18: + return NewPtrApiVersionsRequest() + case 19: + return NewPtrCreateTopicsRequest() + case 20: + return NewPtrDeleteTopicsRequest() + case 21: + return NewPtrDeleteRecordsRequest() + case 22: + return NewPtrInitProducerIDRequest() + case 23: + return NewPtrOffsetForLeaderEpochRequest() + case 24: + return NewPtrAddPartitionsToTxnRequest() + case 25: + return NewPtrAddOffsetsToTxnRequest() + case 26: + return NewPtrEndTxnRequest() + case 27: + return NewPtrWriteTxnMarkersRequest() + case 28: + return NewPtrTxnOffsetCommitRequest() + case 29: + return NewPtrDescribeACLsRequest() + case 30: + return NewPtrCreateACLsRequest() + case 31: + return NewPtrDeleteACLsRequest() + case 32: + return NewPtrDescribeConfigsRequest() + case 33: + return NewPtrAlterConfigsRequest() + case 34: + return NewPtrAlterReplicaLogDirsRequest() + case 35: + return NewPtrDescribeLogDirsRequest() + case 36: + return NewPtrSASLAuthenticateRequest() + case 37: + return NewPtrCreatePartitionsRequest() + case 38: + return NewPtrCreateDelegationTokenRequest() + case 39: + return NewPtrRenewDelegationTokenRequest() + case 40: + return NewPtrExpireDelegationTokenRequest() + case 41: + return NewPtrDescribeDelegationTokenRequest() + case 42: + return NewPtrDeleteGroupsRequest() + case 43: + return NewPtrElectLeadersRequest() + case 44: + return NewPtrIncrementalAlterConfigsRequest() + case 45: + return NewPtrAlterPartitionAssignmentsRequest() + case 46: + return NewPtrListPartitionReassignmentsRequest() + case 47: + return NewPtrOffsetDeleteRequest() + case 48: + return NewPtrDescribeClientQuotasRequest() + case 49: + return NewPtrAlterClientQuotasRequest() + case 50: + return NewPtrDescribeUserSCRAMCredentialsRequest() + case 51: + return NewPtrAlterUserSCRAMCredentialsRequest() + case 52: + return NewPtrVoteRequest() + case 53: + return NewPtrBeginQuorumEpochRequest() + case 54: + return NewPtrEndQuorumEpochRequest() + case 55: + return NewPtrDescribeQuorumRequest() + case 56: + return NewPtrAlterPartitionRequest() + case 57: + return NewPtrUpdateFeaturesRequest() + case 58: + return NewPtrEnvelopeRequest() + case 59: + return NewPtrFetchSnapshotRequest() + case 60: + return NewPtrDescribeClusterRequest() + case 61: + return NewPtrDescribeProducersRequest() + case 62: + return NewPtrBrokerRegistrationRequest() + case 63: + return NewPtrBrokerHeartbeatRequest() + case 64: + return NewPtrUnregisterBrokerRequest() + case 65: + return NewPtrDescribeTransactionsRequest() + case 66: + return NewPtrListTransactionsRequest() + case 67: + return NewPtrAllocateProducerIDsRequest() + case 68: + return NewPtrConsumerGroupHeartbeatRequest() + } +} + +// ResponseForKey returns the response corresponding to the given request key +// or nil if the key is unknown. +func ResponseForKey(key int16) Response { + switch key { + default: + return nil + case 0: + return NewPtrProduceResponse() + case 1: + return NewPtrFetchResponse() + case 2: + return NewPtrListOffsetsResponse() + case 3: + return NewPtrMetadataResponse() + case 4: + return NewPtrLeaderAndISRResponse() + case 5: + return NewPtrStopReplicaResponse() + case 6: + return NewPtrUpdateMetadataResponse() + case 7: + return NewPtrControlledShutdownResponse() + case 8: + return NewPtrOffsetCommitResponse() + case 9: + return NewPtrOffsetFetchResponse() + case 10: + return NewPtrFindCoordinatorResponse() + case 11: + return NewPtrJoinGroupResponse() + case 12: + return NewPtrHeartbeatResponse() + case 13: + return NewPtrLeaveGroupResponse() + case 14: + return NewPtrSyncGroupResponse() + case 15: + return NewPtrDescribeGroupsResponse() + case 16: + return NewPtrListGroupsResponse() + case 17: + return NewPtrSASLHandshakeResponse() + case 18: + return NewPtrApiVersionsResponse() + case 19: + return NewPtrCreateTopicsResponse() + case 20: + return NewPtrDeleteTopicsResponse() + case 21: + return NewPtrDeleteRecordsResponse() + case 22: + return NewPtrInitProducerIDResponse() + case 23: + return NewPtrOffsetForLeaderEpochResponse() + case 24: + return NewPtrAddPartitionsToTxnResponse() + case 25: + return NewPtrAddOffsetsToTxnResponse() + case 26: + return NewPtrEndTxnResponse() + case 27: + return NewPtrWriteTxnMarkersResponse() + case 28: + return NewPtrTxnOffsetCommitResponse() + case 29: + return NewPtrDescribeACLsResponse() + case 30: + return NewPtrCreateACLsResponse() + case 31: + return NewPtrDeleteACLsResponse() + case 32: + return NewPtrDescribeConfigsResponse() + case 33: + return NewPtrAlterConfigsResponse() + case 34: + return NewPtrAlterReplicaLogDirsResponse() + case 35: + return NewPtrDescribeLogDirsResponse() + case 36: + return NewPtrSASLAuthenticateResponse() + case 37: + return NewPtrCreatePartitionsResponse() + case 38: + return NewPtrCreateDelegationTokenResponse() + case 39: + return NewPtrRenewDelegationTokenResponse() + case 40: + return NewPtrExpireDelegationTokenResponse() + case 41: + return NewPtrDescribeDelegationTokenResponse() + case 42: + return NewPtrDeleteGroupsResponse() + case 43: + return NewPtrElectLeadersResponse() + case 44: + return NewPtrIncrementalAlterConfigsResponse() + case 45: + return NewPtrAlterPartitionAssignmentsResponse() + case 46: + return NewPtrListPartitionReassignmentsResponse() + case 47: + return NewPtrOffsetDeleteResponse() + case 48: + return NewPtrDescribeClientQuotasResponse() + case 49: + return NewPtrAlterClientQuotasResponse() + case 50: + return NewPtrDescribeUserSCRAMCredentialsResponse() + case 51: + return NewPtrAlterUserSCRAMCredentialsResponse() + case 52: + return NewPtrVoteResponse() + case 53: + return NewPtrBeginQuorumEpochResponse() + case 54: + return NewPtrEndQuorumEpochResponse() + case 55: + return NewPtrDescribeQuorumResponse() + case 56: + return NewPtrAlterPartitionResponse() + case 57: + return NewPtrUpdateFeaturesResponse() + case 58: + return NewPtrEnvelopeResponse() + case 59: + return NewPtrFetchSnapshotResponse() + case 60: + return NewPtrDescribeClusterResponse() + case 61: + return NewPtrDescribeProducersResponse() + case 62: + return NewPtrBrokerRegistrationResponse() + case 63: + return NewPtrBrokerHeartbeatResponse() + case 64: + return NewPtrUnregisterBrokerResponse() + case 65: + return NewPtrDescribeTransactionsResponse() + case 66: + return NewPtrListTransactionsResponse() + case 67: + return NewPtrAllocateProducerIDsResponse() + case 68: + return NewPtrConsumerGroupHeartbeatResponse() + } +} + +// NameForKey returns the name (e.g., "Fetch") corresponding to a given request key +// or "" if the key is unknown. +func NameForKey(key int16) string { + switch key { + default: + return "Unknown" + case 0: + return "Produce" + case 1: + return "Fetch" + case 2: + return "ListOffsets" + case 3: + return "Metadata" + case 4: + return "LeaderAndISR" + case 5: + return "StopReplica" + case 6: + return "UpdateMetadata" + case 7: + return "ControlledShutdown" + case 8: + return "OffsetCommit" + case 9: + return "OffsetFetch" + case 10: + return "FindCoordinator" + case 11: + return "JoinGroup" + case 12: + return "Heartbeat" + case 13: + return "LeaveGroup" + case 14: + return "SyncGroup" + case 15: + return "DescribeGroups" + case 16: + return "ListGroups" + case 17: + return "SASLHandshake" + case 18: + return "ApiVersions" + case 19: + return "CreateTopics" + case 20: + return "DeleteTopics" + case 21: + return "DeleteRecords" + case 22: + return "InitProducerID" + case 23: + return "OffsetForLeaderEpoch" + case 24: + return "AddPartitionsToTxn" + case 25: + return "AddOffsetsToTxn" + case 26: + return "EndTxn" + case 27: + return "WriteTxnMarkers" + case 28: + return "TxnOffsetCommit" + case 29: + return "DescribeACLs" + case 30: + return "CreateACLs" + case 31: + return "DeleteACLs" + case 32: + return "DescribeConfigs" + case 33: + return "AlterConfigs" + case 34: + return "AlterReplicaLogDirs" + case 35: + return "DescribeLogDirs" + case 36: + return "SASLAuthenticate" + case 37: + return "CreatePartitions" + case 38: + return "CreateDelegationToken" + case 39: + return "RenewDelegationToken" + case 40: + return "ExpireDelegationToken" + case 41: + return "DescribeDelegationToken" + case 42: + return "DeleteGroups" + case 43: + return "ElectLeaders" + case 44: + return "IncrementalAlterConfigs" + case 45: + return "AlterPartitionAssignments" + case 46: + return "ListPartitionReassignments" + case 47: + return "OffsetDelete" + case 48: + return "DescribeClientQuotas" + case 49: + return "AlterClientQuotas" + case 50: + return "DescribeUserSCRAMCredentials" + case 51: + return "AlterUserSCRAMCredentials" + case 52: + return "Vote" + case 53: + return "BeginQuorumEpoch" + case 54: + return "EndQuorumEpoch" + case 55: + return "DescribeQuorum" + case 56: + return "AlterPartition" + case 57: + return "UpdateFeatures" + case 58: + return "Envelope" + case 59: + return "FetchSnapshot" + case 60: + return "DescribeCluster" + case 61: + return "DescribeProducers" + case 62: + return "BrokerRegistration" + case 63: + return "BrokerHeartbeat" + case 64: + return "UnregisterBroker" + case 65: + return "DescribeTransactions" + case 66: + return "ListTransactions" + case 67: + return "AllocateProducerIDs" + case 68: + return "ConsumerGroupHeartbeat" + } +} + +// Key is a typed representation of a request key, with helper functions. +type Key int16 + +const ( + Produce Key = 0 + Fetch Key = 1 + ListOffsets Key = 2 + Metadata Key = 3 + LeaderAndISR Key = 4 + StopReplica Key = 5 + UpdateMetadata Key = 6 + ControlledShutdown Key = 7 + OffsetCommit Key = 8 + OffsetFetch Key = 9 + FindCoordinator Key = 10 + JoinGroup Key = 11 + Heartbeat Key = 12 + LeaveGroup Key = 13 + SyncGroup Key = 14 + DescribeGroups Key = 15 + ListGroups Key = 16 + SASLHandshake Key = 17 + ApiVersions Key = 18 + CreateTopics Key = 19 + DeleteTopics Key = 20 + DeleteRecords Key = 21 + InitProducerID Key = 22 + OffsetForLeaderEpoch Key = 23 + AddPartitionsToTxn Key = 24 + AddOffsetsToTxn Key = 25 + EndTxn Key = 26 + WriteTxnMarkers Key = 27 + TxnOffsetCommit Key = 28 + DescribeACLs Key = 29 + CreateACLs Key = 30 + DeleteACLs Key = 31 + DescribeConfigs Key = 32 + AlterConfigs Key = 33 + AlterReplicaLogDirs Key = 34 + DescribeLogDirs Key = 35 + SASLAuthenticate Key = 36 + CreatePartitions Key = 37 + CreateDelegationToken Key = 38 + RenewDelegationToken Key = 39 + ExpireDelegationToken Key = 40 + DescribeDelegationToken Key = 41 + DeleteGroups Key = 42 + ElectLeaders Key = 43 + IncrementalAlterConfigs Key = 44 + AlterPartitionAssignments Key = 45 + ListPartitionReassignments Key = 46 + OffsetDelete Key = 47 + DescribeClientQuotas Key = 48 + AlterClientQuotas Key = 49 + DescribeUserSCRAMCredentials Key = 50 + AlterUserSCRAMCredentials Key = 51 + Vote Key = 52 + BeginQuorumEpoch Key = 53 + EndQuorumEpoch Key = 54 + DescribeQuorum Key = 55 + AlterPartition Key = 56 + UpdateFeatures Key = 57 + Envelope Key = 58 + FetchSnapshot Key = 59 + DescribeCluster Key = 60 + DescribeProducers Key = 61 + BrokerRegistration Key = 62 + BrokerHeartbeat Key = 63 + UnregisterBroker Key = 64 + DescribeTransactions Key = 65 + ListTransactions Key = 66 + AllocateProducerIDs Key = 67 + ConsumerGroupHeartbeat Key = 68 +) + +// Name returns the name for this key. +func (k Key) Name() string { return NameForKey(int16(k)) } + +// Request returns a new request for this key if the key is known. +func (k Key) Request() Request { return RequestForKey(int16(k)) } + +// Response returns a new response for this key if the key is known. +func (k Key) Response() Response { return ResponseForKey(int16(k)) } + +// Int16 is an alias for int16(k). +func (k Key) Int16() int16 { return int16(k) } + +// A type of config. +// +// Possible values and their meanings: +// +// * 2 (TOPIC) +// +// * 4 (BROKER) +// +// * 8 (BROKER_LOGGER) +type ConfigResourceType int8 + +func (v ConfigResourceType) String() string { + switch v { + default: + return "UNKNOWN" + case 2: + return "TOPIC" + case 4: + return "BROKER" + case 8: + return "BROKER_LOGGER" + } +} + +func ConfigResourceTypeStrings() []string { + return []string{ + "TOPIC", + "BROKER", + "BROKER_LOGGER", + } +} + +// ParseConfigResourceType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseConfigResourceType(s string) (ConfigResourceType, error) { + switch strnorm(s) { + case "topic": + return 2, nil + case "broker": + return 4, nil + case "brokerlogger": + return 8, nil + default: + return 0, fmt.Errorf("ConfigResourceType: unable to parse %q", s) + } +} + +const ( + ConfigResourceTypeUnknown ConfigResourceType = 0 + ConfigResourceTypeTopic ConfigResourceType = 2 + ConfigResourceTypeBroker ConfigResourceType = 4 + ConfigResourceTypeBrokerLogger ConfigResourceType = 8 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ConfigResourceType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ConfigResourceType) UnmarshalText(text []byte) error { + v, err := ParseConfigResourceType(string(text)) + *e = v + return err +} + +// Where a config entry is from. If there are no config synonyms, +// the source is DEFAULT_CONFIG. +// +// Possible values and their meanings: +// +// * 1 (DYNAMIC_TOPIC_CONFIG) +// Dynamic topic config for a specific topic. +// +// * 2 (DYNAMIC_BROKER_CONFIG) +// Dynamic broker config for a specific broker. +// +// * 3 (DYNAMIC_DEFAULT_BROKER_CONFIG) +// Dynamic broker config used as the default for all brokers in a cluster. +// +// * 4 (STATIC_BROKER_CONFIG) +// Static broker config provided at start up. +// +// * 5 (DEFAULT_CONFIG) +// Built-in default configuration for those that have defaults. +// +// * 6 (DYNAMIC_BROKER_LOGGER_CONFIG) +// Broker logger; see KIP-412. +type ConfigSource int8 + +func (v ConfigSource) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "DYNAMIC_TOPIC_CONFIG" + case 2: + return "DYNAMIC_BROKER_CONFIG" + case 3: + return "DYNAMIC_DEFAULT_BROKER_CONFIG" + case 4: + return "STATIC_BROKER_CONFIG" + case 5: + return "DEFAULT_CONFIG" + case 6: + return "DYNAMIC_BROKER_LOGGER_CONFIG" + } +} + +func ConfigSourceStrings() []string { + return []string{ + "DYNAMIC_TOPIC_CONFIG", + "DYNAMIC_BROKER_CONFIG", + "DYNAMIC_DEFAULT_BROKER_CONFIG", + "STATIC_BROKER_CONFIG", + "DEFAULT_CONFIG", + "DYNAMIC_BROKER_LOGGER_CONFIG", + } +} + +// ParseConfigSource normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseConfigSource(s string) (ConfigSource, error) { + switch strnorm(s) { + case "dynamictopicconfig": + return 1, nil + case "dynamicbrokerconfig": + return 2, nil + case "dynamicdefaultbrokerconfig": + return 3, nil + case "staticbrokerconfig": + return 4, nil + case "defaultconfig": + return 5, nil + case "dynamicbrokerloggerconfig": + return 6, nil + default: + return 0, fmt.Errorf("ConfigSource: unable to parse %q", s) + } +} + +const ( + ConfigSourceUnknown ConfigSource = 0 + ConfigSourceDynamicTopicConfig ConfigSource = 1 + ConfigSourceDynamicBrokerConfig ConfigSource = 2 + ConfigSourceDynamicDefaultBrokerConfig ConfigSource = 3 + ConfigSourceStaticBrokerConfig ConfigSource = 4 + ConfigSourceDefaultConfig ConfigSource = 5 + ConfigSourceDynamicBrokerLoggerConfig ConfigSource = 6 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ConfigSource) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ConfigSource) UnmarshalText(text []byte) error { + v, err := ParseConfigSource(string(text)) + *e = v + return err +} + +// A configuration data type. +// +// Possible values and their meanings: +// +// * 1 (BOOLEAN) +// +// * 2 (STRING) +// +// * 3 (INT) +// +// * 4 (SHORT) +// +// * 5 (LONG) +// +// * 6 (DOUBLE) +// +// * 7 (LIST) +// +// * 8 (CLASS) +// +// * 9 (PASSWORD) +type ConfigType int8 + +func (v ConfigType) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "BOOLEAN" + case 2: + return "STRING" + case 3: + return "INT" + case 4: + return "SHORT" + case 5: + return "LONG" + case 6: + return "DOUBLE" + case 7: + return "LIST" + case 8: + return "CLASS" + case 9: + return "PASSWORD" + } +} + +func ConfigTypeStrings() []string { + return []string{ + "BOOLEAN", + "STRING", + "INT", + "SHORT", + "LONG", + "DOUBLE", + "LIST", + "CLASS", + "PASSWORD", + } +} + +// ParseConfigType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseConfigType(s string) (ConfigType, error) { + switch strnorm(s) { + case "boolean": + return 1, nil + case "string": + return 2, nil + case "int": + return 3, nil + case "short": + return 4, nil + case "long": + return 5, nil + case "double": + return 6, nil + case "list": + return 7, nil + case "class": + return 8, nil + case "password": + return 9, nil + default: + return 0, fmt.Errorf("ConfigType: unable to parse %q", s) + } +} + +const ( + ConfigTypeUnknown ConfigType = 0 + ConfigTypeBoolean ConfigType = 1 + ConfigTypeString ConfigType = 2 + ConfigTypeInt ConfigType = 3 + ConfigTypeShort ConfigType = 4 + ConfigTypeLong ConfigType = 5 + ConfigTypeDouble ConfigType = 6 + ConfigTypeList ConfigType = 7 + ConfigTypeClass ConfigType = 8 + ConfigTypePassword ConfigType = 9 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ConfigType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ConfigType) UnmarshalText(text []byte) error { + v, err := ParseConfigType(string(text)) + *e = v + return err +} + +// An incremental configuration operation. +// +// Possible values and their meanings: +// +// * 0 (SET) +// +// * 1 (DELETE) +// +// * 2 (APPEND) +// +// * 3 (SUBTRACT) +type IncrementalAlterConfigOp int8 + +func (v IncrementalAlterConfigOp) String() string { + switch v { + default: + return "UNKNOWN" + case 0: + return "SET" + case 1: + return "DELETE" + case 2: + return "APPEND" + case 3: + return "SUBTRACT" + } +} + +func IncrementalAlterConfigOpStrings() []string { + return []string{ + "SET", + "DELETE", + "APPEND", + "SUBTRACT", + } +} + +// ParseIncrementalAlterConfigOp normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseIncrementalAlterConfigOp(s string) (IncrementalAlterConfigOp, error) { + switch strnorm(s) { + case "set": + return 0, nil + case "delete": + return 1, nil + case "append": + return 2, nil + case "subtract": + return 3, nil + default: + return 0, fmt.Errorf("IncrementalAlterConfigOp: unable to parse %q", s) + } +} + +const ( + IncrementalAlterConfigOpSet IncrementalAlterConfigOp = 0 + IncrementalAlterConfigOpDelete IncrementalAlterConfigOp = 1 + IncrementalAlterConfigOpAppend IncrementalAlterConfigOp = 2 + IncrementalAlterConfigOpSubtract IncrementalAlterConfigOp = 3 +) + +// MarshalText implements encoding.TextMarshaler. +func (e IncrementalAlterConfigOp) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *IncrementalAlterConfigOp) UnmarshalText(text []byte) error { + v, err := ParseIncrementalAlterConfigOp(string(text)) + *e = v + return err +} + +// ACLResourceType is a type of resource to use for ACLs. +// +// Possible values and their meanings: +// +// * 1 (ANY) +// +// * 2 (TOPIC) +// +// * 3 (GROUP) +// +// * 4 (CLUSTER) +// +// * 5 (TRANSACTIONAL_ID) +// +// * 6 (DELEGATION_TOKEN) +// +// * 7 (USER) +type ACLResourceType int8 + +func (v ACLResourceType) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "ANY" + case 2: + return "TOPIC" + case 3: + return "GROUP" + case 4: + return "CLUSTER" + case 5: + return "TRANSACTIONAL_ID" + case 6: + return "DELEGATION_TOKEN" + case 7: + return "USER" + } +} + +func ACLResourceTypeStrings() []string { + return []string{ + "ANY", + "TOPIC", + "GROUP", + "CLUSTER", + "TRANSACTIONAL_ID", + "DELEGATION_TOKEN", + "USER", + } +} + +// ParseACLResourceType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseACLResourceType(s string) (ACLResourceType, error) { + switch strnorm(s) { + case "any": + return 1, nil + case "topic": + return 2, nil + case "group": + return 3, nil + case "cluster": + return 4, nil + case "transactionalid": + return 5, nil + case "delegationtoken": + return 6, nil + case "user": + return 7, nil + default: + return 0, fmt.Errorf("ACLResourceType: unable to parse %q", s) + } +} + +const ( + ACLResourceTypeUnknown ACLResourceType = 0 + ACLResourceTypeAny ACLResourceType = 1 + ACLResourceTypeTopic ACLResourceType = 2 + ACLResourceTypeGroup ACLResourceType = 3 + ACLResourceTypeCluster ACLResourceType = 4 + ACLResourceTypeTransactionalId ACLResourceType = 5 + ACLResourceTypeDelegationToken ACLResourceType = 6 + ACLResourceTypeUser ACLResourceType = 7 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ACLResourceType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ACLResourceType) UnmarshalText(text []byte) error { + v, err := ParseACLResourceType(string(text)) + *e = v + return err +} + +// ACLResourcePatternType is how an acl's ResourceName is understood. +// +// This field was added with Kafka 2.0.0 for KIP-290. +// +// Possible values and their meanings: +// +// * 1 (ANY) +// Matches anything. +// +// * 2 (MATCH) +// Performs pattern matching; i.e., a literal match, or a prefix match, or wildcard. +// +// * 3 (LITERAL) +// The name must be an exact match. +// +// * 4 (PREFIXED) +// The name must have our requested name as a prefix (that is, "foo" will match on "foobar"). +type ACLResourcePatternType int8 + +func (v ACLResourcePatternType) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "ANY" + case 2: + return "MATCH" + case 3: + return "LITERAL" + case 4: + return "PREFIXED" + } +} + +func ACLResourcePatternTypeStrings() []string { + return []string{ + "ANY", + "MATCH", + "LITERAL", + "PREFIXED", + } +} + +// ParseACLResourcePatternType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseACLResourcePatternType(s string) (ACLResourcePatternType, error) { + switch strnorm(s) { + case "any": + return 1, nil + case "match": + return 2, nil + case "literal": + return 3, nil + case "prefixed": + return 4, nil + default: + return 0, fmt.Errorf("ACLResourcePatternType: unable to parse %q", s) + } +} + +const ( + ACLResourcePatternTypeUnknown ACLResourcePatternType = 0 + ACLResourcePatternTypeAny ACLResourcePatternType = 1 + ACLResourcePatternTypeMatch ACLResourcePatternType = 2 + ACLResourcePatternTypeLiteral ACLResourcePatternType = 3 + ACLResourcePatternTypePrefixed ACLResourcePatternType = 4 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ACLResourcePatternType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ACLResourcePatternType) UnmarshalText(text []byte) error { + v, err := ParseACLResourcePatternType(string(text)) + *e = v + return err +} + +// An ACL permission type. +// +// Possible values and their meanings: +// +// * 1 (ANY) +// Any permission. +// +// * 2 (DENY) +// Any deny permission. +// +// * 3 (ALLOW) +// Any allow permission. +type ACLPermissionType int8 + +func (v ACLPermissionType) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "ANY" + case 2: + return "DENY" + case 3: + return "ALLOW" + } +} + +func ACLPermissionTypeStrings() []string { + return []string{ + "ANY", + "DENY", + "ALLOW", + } +} + +// ParseACLPermissionType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseACLPermissionType(s string) (ACLPermissionType, error) { + switch strnorm(s) { + case "any": + return 1, nil + case "deny": + return 2, nil + case "allow": + return 3, nil + default: + return 0, fmt.Errorf("ACLPermissionType: unable to parse %q", s) + } +} + +const ( + ACLPermissionTypeUnknown ACLPermissionType = 0 + ACLPermissionTypeAny ACLPermissionType = 1 + ACLPermissionTypeDeny ACLPermissionType = 2 + ACLPermissionTypeAllow ACLPermissionType = 3 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ACLPermissionType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ACLPermissionType) UnmarshalText(text []byte) error { + v, err := ParseACLPermissionType(string(text)) + *e = v + return err +} + +// An ACL operation. +// +// Possible values and their meanings: +// +// * 1 (ANY) +// Matches anything. +// +// * 2 (ALL) +// Matches anything granted all permissions. +// +// * 3 (READ) +// +// * 4 (WRITE) +// +// * 5 (CREATE) +// +// * 6 (DELETE) +// +// * 7 (ALTER) +// +// * 8 (DESCRIBE) +// +// * 9 (CLUSTER_ACTION) +// +// * 10 (DESCRIBE_CONFIGS) +// +// * 11 (ALTER_CONFIGS) +// +// * 12 (IDEMPOTENT_WRITE) +// +// * 13 (CREATE_TOKENS) +// +// * 14 (DESCRIBE_TOKENS) +type ACLOperation int8 + +func (v ACLOperation) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "ANY" + case 2: + return "ALL" + case 3: + return "READ" + case 4: + return "WRITE" + case 5: + return "CREATE" + case 6: + return "DELETE" + case 7: + return "ALTER" + case 8: + return "DESCRIBE" + case 9: + return "CLUSTER_ACTION" + case 10: + return "DESCRIBE_CONFIGS" + case 11: + return "ALTER_CONFIGS" + case 12: + return "IDEMPOTENT_WRITE" + case 13: + return "CREATE_TOKENS" + case 14: + return "DESCRIBE_TOKENS" + } +} + +func ACLOperationStrings() []string { + return []string{ + "ANY", + "ALL", + "READ", + "WRITE", + "CREATE", + "DELETE", + "ALTER", + "DESCRIBE", + "CLUSTER_ACTION", + "DESCRIBE_CONFIGS", + "ALTER_CONFIGS", + "IDEMPOTENT_WRITE", + "CREATE_TOKENS", + "DESCRIBE_TOKENS", + } +} + +// ParseACLOperation normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseACLOperation(s string) (ACLOperation, error) { + switch strnorm(s) { + case "any": + return 1, nil + case "all": + return 2, nil + case "read": + return 3, nil + case "write": + return 4, nil + case "create": + return 5, nil + case "delete": + return 6, nil + case "alter": + return 7, nil + case "describe": + return 8, nil + case "clusteraction": + return 9, nil + case "describeconfigs": + return 10, nil + case "alterconfigs": + return 11, nil + case "idempotentwrite": + return 12, nil + case "createtokens": + return 13, nil + case "describetokens": + return 14, nil + default: + return 0, fmt.Errorf("ACLOperation: unable to parse %q", s) + } +} + +const ( + ACLOperationUnknown ACLOperation = 0 + ACLOperationAny ACLOperation = 1 + ACLOperationAll ACLOperation = 2 + ACLOperationRead ACLOperation = 3 + ACLOperationWrite ACLOperation = 4 + ACLOperationCreate ACLOperation = 5 + ACLOperationDelete ACLOperation = 6 + ACLOperationAlter ACLOperation = 7 + ACLOperationDescribe ACLOperation = 8 + ACLOperationClusterAction ACLOperation = 9 + ACLOperationDescribeConfigs ACLOperation = 10 + ACLOperationAlterConfigs ACLOperation = 11 + ACLOperationIdempotentWrite ACLOperation = 12 + ACLOperationCreateTokens ACLOperation = 13 + ACLOperationDescribeTokens ACLOperation = 14 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ACLOperation) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ACLOperation) UnmarshalText(text []byte) error { + v, err := ParseACLOperation(string(text)) + *e = v + return err +} + +// TransactionState is the state of a transaction. +// +// Possible values and their meanings: +// +// * 0 (Empty) +// +// * 1 (Ongoing) +// +// * 2 (PrepareCommit) +// +// * 3 (PrepareAbort) +// +// * 4 (CompleteCommit) +// +// * 5 (CompleteAbort) +// +// * 6 (Dead) +// +// * 7 (PrepareEpochFence) +type TransactionState int8 + +func (v TransactionState) String() string { + switch v { + default: + return "Unknown" + case 0: + return "Empty" + case 1: + return "Ongoing" + case 2: + return "PrepareCommit" + case 3: + return "PrepareAbort" + case 4: + return "CompleteCommit" + case 5: + return "CompleteAbort" + case 6: + return "Dead" + case 7: + return "PrepareEpochFence" + } +} + +func TransactionStateStrings() []string { + return []string{ + "Empty", + "Ongoing", + "PrepareCommit", + "PrepareAbort", + "CompleteCommit", + "CompleteAbort", + "Dead", + "PrepareEpochFence", + } +} + +// ParseTransactionState normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseTransactionState(s string) (TransactionState, error) { + switch strnorm(s) { + case "empty": + return 0, nil + case "ongoing": + return 1, nil + case "preparecommit": + return 2, nil + case "prepareabort": + return 3, nil + case "completecommit": + return 4, nil + case "completeabort": + return 5, nil + case "dead": + return 6, nil + case "prepareepochfence": + return 7, nil + default: + return 0, fmt.Errorf("TransactionState: unable to parse %q", s) + } +} + +const ( + TransactionStateEmpty TransactionState = 0 + TransactionStateOngoing TransactionState = 1 + TransactionStatePrepareCommit TransactionState = 2 + TransactionStatePrepareAbort TransactionState = 3 + TransactionStateCompleteCommit TransactionState = 4 + TransactionStateCompleteAbort TransactionState = 5 + TransactionStateDead TransactionState = 6 + TransactionStatePrepareEpochFence TransactionState = 7 +) + +// MarshalText implements encoding.TextMarshaler. +func (e TransactionState) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *TransactionState) UnmarshalText(text []byte) error { + v, err := ParseTransactionState(string(text)) + *e = v + return err +} + +// QuotasMatchType specifies how to match a Quota entity as part of the DescribeClientQuotasRequestComponent. +// +// Possible values and their meanings: +// +// * 0 (EXACT) +// Matches all quotas for the given EntityType with names equal to the Match field. +// +// * 1 (DEFAULT) +// Matches the default for the given EntityType. +// +// * 2 (ANY) +// Matches all named quotas and default quotas for the given EntityType. +type QuotasMatchType int8 + +func (v QuotasMatchType) String() string { + switch v { + default: + return "UNKNOWN" + case 0: + return "EXACT" + case 1: + return "DEFAULT" + case 2: + return "ANY" + } +} + +func QuotasMatchTypeStrings() []string { + return []string{ + "EXACT", + "DEFAULT", + "ANY", + } +} + +// ParseQuotasMatchType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseQuotasMatchType(s string) (QuotasMatchType, error) { + switch strnorm(s) { + case "exact": + return 0, nil + case "default": + return 1, nil + case "any": + return 2, nil + default: + return 0, fmt.Errorf("QuotasMatchType: unable to parse %q", s) + } +} + +const ( + QuotasMatchTypeExact QuotasMatchType = 0 + QuotasMatchTypeDefault QuotasMatchType = 1 + QuotasMatchTypeAny QuotasMatchType = 2 +) + +// MarshalText implements encoding.TextMarshaler. +func (e QuotasMatchType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *QuotasMatchType) UnmarshalText(text []byte) error { + v, err := ParseQuotasMatchType(string(text)) + *e = v + return err +} + +// Possible values and their meanings: +// +// * 0 (ABORT) +// +// * 1 (COMMIT) +// +// * 2 (QUORUM_REASSIGNMENT) +// +// * 3 (LEADER_CHANGE) +type ControlRecordKeyType int8 + +func (v ControlRecordKeyType) String() string { + switch v { + default: + return "UNKNOWN" + case 0: + return "ABORT" + case 1: + return "COMMIT" + case 2: + return "QUORUM_REASSIGNMENT" + case 3: + return "LEADER_CHANGE" + } +} + +func ControlRecordKeyTypeStrings() []string { + return []string{ + "ABORT", + "COMMIT", + "QUORUM_REASSIGNMENT", + "LEADER_CHANGE", + } +} + +// ParseControlRecordKeyType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseControlRecordKeyType(s string) (ControlRecordKeyType, error) { + switch strnorm(s) { + case "abort": + return 0, nil + case "commit": + return 1, nil + case "quorumreassignment": + return 2, nil + case "leaderchange": + return 3, nil + default: + return 0, fmt.Errorf("ControlRecordKeyType: unable to parse %q", s) + } +} + +const ( + ControlRecordKeyTypeAbort ControlRecordKeyType = 0 + ControlRecordKeyTypeCommit ControlRecordKeyType = 1 + ControlRecordKeyTypeQuorumReassignment ControlRecordKeyType = 2 + ControlRecordKeyTypeLeaderChange ControlRecordKeyType = 3 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ControlRecordKeyType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ControlRecordKeyType) UnmarshalText(text []byte) error { + v, err := ParseControlRecordKeyType(string(text)) + *e = v + return err +} + +func strnorm(s string) string { + s = strings.ReplaceAll(s, ".", "") + s = strings.ReplaceAll(s, "_", "") + s = strings.ReplaceAll(s, "-", "") + s = strings.TrimSpace(s) + s = strings.ToLower(s) + return s +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/internal/kbin/primitives.go b/vendor/github.com/twmb/franz-go/pkg/kmsg/internal/kbin/primitives.go new file mode 100644 index 0000000..2c5990d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/internal/kbin/primitives.go @@ -0,0 +1,850 @@ +// Package kbin contains Kafka primitive reading and writing functions. +package kbin + +import ( + "encoding/binary" + "errors" + "math" + "math/bits" + "reflect" + "unsafe" +) + +// This file contains primitive type encoding and decoding. +// +// The Reader helper can be used even when content runs out +// or an error is hit; all other number requests will return +// zero so a decode will basically no-op. + +// ErrNotEnoughData is returned when a type could not fully decode +// from a slice because the slice did not have enough data. +var ErrNotEnoughData = errors.New("response did not contain enough data to be valid") + +// AppendBool appends 1 for true or 0 for false to dst. +func AppendBool(dst []byte, v bool) []byte { + if v { + return append(dst, 1) + } + return append(dst, 0) +} + +// AppendInt8 appends an int8 to dst. +func AppendInt8(dst []byte, i int8) []byte { + return append(dst, byte(i)) +} + +// AppendInt16 appends a big endian int16 to dst. +func AppendInt16(dst []byte, i int16) []byte { + return AppendUint16(dst, uint16(i)) +} + +// AppendUint16 appends a big endian uint16 to dst. +func AppendUint16(dst []byte, u uint16) []byte { + return append(dst, byte(u>>8), byte(u)) +} + +// AppendInt32 appends a big endian int32 to dst. +func AppendInt32(dst []byte, i int32) []byte { + return AppendUint32(dst, uint32(i)) +} + +// AppendInt64 appends a big endian int64 to dst. +func AppendInt64(dst []byte, i int64) []byte { + return appendUint64(dst, uint64(i)) +} + +// AppendFloat64 appends a big endian float64 to dst. +func AppendFloat64(dst []byte, f float64) []byte { + return appendUint64(dst, math.Float64bits(f)) +} + +// AppendUuid appends the 16 uuid bytes to dst. +func AppendUuid(dst []byte, uuid [16]byte) []byte { + return append(dst, uuid[:]...) +} + +func appendUint64(dst []byte, u uint64) []byte { + return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), + byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// AppendUint32 appends a big endian uint32 to dst. +func AppendUint32(dst []byte, u uint32) []byte { + return append(dst, byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// uvarintLens could only be length 65, but using 256 allows bounds check +// elimination on lookup. +const uvarintLens = "\x01\x01\x01\x01\x01\x01\x01\x01\x02\x02\x02\x02\x02\x02\x02\x03\x03\x03\x03\x03\x03\x03\x04\x04\x04\x04\x04\x04\x04\x05\x05\x05\x05\x05\x05\x05\x06\x06\x06\x06\x06\x06\x06\x07\x07\x07\x07\x07\x07\x07\x08\x08\x08\x08\x08\x08\x08\x09\x09\x09\x09\x09\x09\x09\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + +// VarintLen returns how long i would be if it were varint encoded. +func VarintLen(i int32) int { + u := uint32(i)<<1 ^ uint32(i>>31) + return UvarintLen(u) +} + +// UvarintLen returns how long u would be if it were uvarint encoded. +func UvarintLen(u uint32) int { + return int(uvarintLens[byte(bits.Len32(u))]) +} + +func uvarlongLen(u uint64) int { + return int(uvarintLens[byte(bits.Len64(u))]) +} + +// Varint is a loop unrolled 32 bit varint decoder. The return semantics +// are the same as binary.Varint, with the added benefit that overflows +// in 5 byte encodings are handled rather than left to the user. +func Varint(in []byte) (int32, int) { + x, n := Uvarint(in) + return int32((x >> 1) ^ -(x & 1)), n +} + +// Uvarint is a loop unrolled 32 bit uvarint decoder. The return semantics +// are the same as binary.Uvarint, with the added benefit that overflows +// in 5 byte encodings are handled rather than left to the user. +func Uvarint(in []byte) (uint32, int) { + var x uint32 + var overflow int + + if len(in) < 1 { + goto fail + } + + x = uint32(in[0] & 0x7f) + if in[0]&0x80 == 0 { + return x, 1 + } else if len(in) < 2 { + goto fail + } + + x |= uint32(in[1]&0x7f) << 7 + if in[1]&0x80 == 0 { + return x, 2 + } else if len(in) < 3 { + goto fail + } + + x |= uint32(in[2]&0x7f) << 14 + if in[2]&0x80 == 0 { + return x, 3 + } else if len(in) < 4 { + goto fail + } + + x |= uint32(in[3]&0x7f) << 21 + if in[3]&0x80 == 0 { + return x, 4 + } else if len(in) < 5 { + goto fail + } + + x |= uint32(in[4]) << 28 + if in[4] <= 0x0f { + return x, 5 + } + + overflow = -5 + +fail: + return 0, overflow +} + +// Varlong is a loop unrolled 64 bit varint decoder. The return semantics +// are the same as binary.Varint, with the added benefit that overflows +// in 10 byte encodings are handled rather than left to the user. +func Varlong(in []byte) (int64, int) { + x, n := uvarlong(in) + return int64((x >> 1) ^ -(x & 1)), n +} + +func uvarlong(in []byte) (uint64, int) { + var x uint64 + var overflow int + + if len(in) < 1 { + goto fail + } + + x = uint64(in[0] & 0x7f) + if in[0]&0x80 == 0 { + return x, 1 + } else if len(in) < 2 { + goto fail + } + + x |= uint64(in[1]&0x7f) << 7 + if in[1]&0x80 == 0 { + return x, 2 + } else if len(in) < 3 { + goto fail + } + + x |= uint64(in[2]&0x7f) << 14 + if in[2]&0x80 == 0 { + return x, 3 + } else if len(in) < 4 { + goto fail + } + + x |= uint64(in[3]&0x7f) << 21 + if in[3]&0x80 == 0 { + return x, 4 + } else if len(in) < 5 { + goto fail + } + + x |= uint64(in[4]&0x7f) << 28 + if in[4]&0x80 == 0 { + return x, 5 + } else if len(in) < 6 { + goto fail + } + + x |= uint64(in[5]&0x7f) << 35 + if in[5]&0x80 == 0 { + return x, 6 + } else if len(in) < 7 { + goto fail + } + + x |= uint64(in[6]&0x7f) << 42 + if in[6]&0x80 == 0 { + return x, 7 + } else if len(in) < 8 { + goto fail + } + + x |= uint64(in[7]&0x7f) << 49 + if in[7]&0x80 == 0 { + return x, 8 + } else if len(in) < 9 { + goto fail + } + + x |= uint64(in[8]&0x7f) << 56 + if in[8]&0x80 == 0 { + return x, 9 + } else if len(in) < 10 { + goto fail + } + + x |= uint64(in[9]) << 63 + if in[9] <= 0x01 { + return x, 10 + } + + overflow = -10 + +fail: + return 0, overflow +} + +// AppendVarint appends a varint encoded i to dst. +func AppendVarint(dst []byte, i int32) []byte { + return AppendUvarint(dst, uint32(i)<<1^uint32(i>>31)) +} + +// AppendUvarint appends a uvarint encoded u to dst. +func AppendUvarint(dst []byte, u uint32) []byte { + switch UvarintLen(u) { + case 5: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte(u>>28)) + case 4: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte(u>>21)) + case 3: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte(u>>14)) + case 2: + return append(dst, + byte(u&0x7f|0x80), + byte(u>>7)) + case 1: + return append(dst, byte(u)) + } + return dst +} + +// AppendVarlong appends a varint encoded i to dst. +func AppendVarlong(dst []byte, i int64) []byte { + return appendUvarlong(dst, uint64(i)<<1^uint64(i>>63)) +} + +func appendUvarlong(dst []byte, u uint64) []byte { + switch uvarlongLen(u) { + case 10: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte((u>>49)&0x7f|0x80), + byte((u>>56)&0x7f|0x80), + byte(u>>63)) + case 9: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte((u>>49)&0x7f|0x80), + byte(u>>56)) + case 8: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte(u>>49)) + case 7: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte(u>>42)) + case 6: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte(u>>35)) + case 5: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte(u>>28)) + case 4: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte(u>>21)) + case 3: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte(u>>14)) + case 2: + return append(dst, + byte(u&0x7f|0x80), + byte(u>>7)) + case 1: + return append(dst, byte(u)) + } + return dst +} + +// AppendString appends a string to dst prefixed with its int16 length. +func AppendString(dst []byte, s string) []byte { + dst = AppendInt16(dst, int16(len(s))) + return append(dst, s...) +} + +// AppendCompactString appends a string to dst prefixed with its uvarint length +// starting at 1; 0 is reserved for null, which compact strings are not +// (nullable compact ones are!). Thus, the length is the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactString(dst []byte, s string) []byte { + dst = AppendUvarint(dst, 1+uint32(len(s))) + return append(dst, s...) +} + +// AppendNullableString appends potentially nil string to dst prefixed with its +// int16 length or int16(-1) if nil. +func AppendNullableString(dst []byte, s *string) []byte { + if s == nil { + return AppendInt16(dst, -1) + } + return AppendString(dst, *s) +} + +// AppendCompactNullableString appends a potentially nil string to dst with its +// uvarint length starting at 1, with 0 indicating null. Thus, the length is +// the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactNullableString(dst []byte, s *string) []byte { + if s == nil { + return AppendUvarint(dst, 0) + } + return AppendCompactString(dst, *s) +} + +// AppendBytes appends bytes to dst prefixed with its int32 length. +func AppendBytes(dst, b []byte) []byte { + dst = AppendInt32(dst, int32(len(b))) + return append(dst, b...) +} + +// AppendCompactBytes appends bytes to dst prefixed with a its uvarint length +// starting at 1; 0 is reserved for null, which compact bytes are not (nullable +// compact ones are!). Thus, the length is the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactBytes(dst, b []byte) []byte { + dst = AppendUvarint(dst, 1+uint32(len(b))) + return append(dst, b...) +} + +// AppendNullableBytes appends a potentially nil slice to dst prefixed with its +// int32 length or int32(-1) if nil. +func AppendNullableBytes(dst, b []byte) []byte { + if b == nil { + return AppendInt32(dst, -1) + } + return AppendBytes(dst, b) +} + +// AppendCompactNullableBytes appends a potentially nil slice to dst with its +// uvarint length starting at 1, with 0 indicating null. Thus, the length is +// the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactNullableBytes(dst, b []byte) []byte { + if b == nil { + return AppendUvarint(dst, 0) + } + return AppendCompactBytes(dst, b) +} + +// AppendVarintString appends a string to dst prefixed with its length encoded +// as a varint. +func AppendVarintString(dst []byte, s string) []byte { + dst = AppendVarint(dst, int32(len(s))) + return append(dst, s...) +} + +// AppendVarintBytes appends a slice to dst prefixed with its length encoded as +// a varint. +func AppendVarintBytes(dst, b []byte) []byte { + if b == nil { + return AppendVarint(dst, -1) + } + dst = AppendVarint(dst, int32(len(b))) + return append(dst, b...) +} + +// AppendArrayLen appends the length of an array as an int32 to dst. +func AppendArrayLen(dst []byte, l int) []byte { + return AppendInt32(dst, int32(l)) +} + +// AppendCompactArrayLen appends the length of an array as a uvarint to dst +// as the length + 1. +// +// For KIP-482. +func AppendCompactArrayLen(dst []byte, l int) []byte { + return AppendUvarint(dst, 1+uint32(l)) +} + +// AppendNullableArrayLen appends the length of an array as an int32 to dst, +// or -1 if isNil is true. +func AppendNullableArrayLen(dst []byte, l int, isNil bool) []byte { + if isNil { + return AppendInt32(dst, -1) + } + return AppendInt32(dst, int32(l)) +} + +// AppendCompactNullableArrayLen appends the length of an array as a uvarint to +// dst as the length + 1; if isNil is true, this appends 0 as a uvarint. +// +// For KIP-482. +func AppendCompactNullableArrayLen(dst []byte, l int, isNil bool) []byte { + if isNil { + return AppendUvarint(dst, 0) + } + return AppendUvarint(dst, 1+uint32(l)) +} + +// Reader is used to decode Kafka messages. +// +// For all functions on Reader, if the reader has been invalidated, functions +// return defaults (false, 0, nil, ""). Use Complete to detect if the reader +// was invalidated or if the reader has remaining data. +type Reader struct { + Src []byte + bad bool +} + +// Bool returns a bool from the reader. +func (b *Reader) Bool() bool { + if len(b.Src) < 1 { + b.bad = true + b.Src = nil + return false + } + t := b.Src[0] != 0 // if '0', false + b.Src = b.Src[1:] + return t +} + +// Int8 returns an int8 from the reader. +func (b *Reader) Int8() int8 { + if len(b.Src) < 1 { + b.bad = true + b.Src = nil + return 0 + } + r := b.Src[0] + b.Src = b.Src[1:] + return int8(r) +} + +// Int16 returns an int16 from the reader. +func (b *Reader) Int16() int16 { + if len(b.Src) < 2 { + b.bad = true + b.Src = nil + return 0 + } + r := int16(binary.BigEndian.Uint16(b.Src)) + b.Src = b.Src[2:] + return r +} + +// Uint16 returns an uint16 from the reader. +func (b *Reader) Uint16() uint16 { + if len(b.Src) < 2 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint16(b.Src) + b.Src = b.Src[2:] + return r +} + +// Int32 returns an int32 from the reader. +func (b *Reader) Int32() int32 { + if len(b.Src) < 4 { + b.bad = true + b.Src = nil + return 0 + } + r := int32(binary.BigEndian.Uint32(b.Src)) + b.Src = b.Src[4:] + return r +} + +// Int64 returns an int64 from the reader. +func (b *Reader) Int64() int64 { + return int64(b.readUint64()) +} + +// Uuid returns a uuid from the reader. +func (b *Reader) Uuid() [16]byte { + var r [16]byte + copy(r[:], b.Span(16)) + return r +} + +// Float64 returns a float64 from the reader. +func (b *Reader) Float64() float64 { + return math.Float64frombits(b.readUint64()) +} + +func (b *Reader) readUint64() uint64 { + if len(b.Src) < 8 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint64(b.Src) + b.Src = b.Src[8:] + return r +} + +// Uint32 returns a uint32 from the reader. +func (b *Reader) Uint32() uint32 { + if len(b.Src) < 4 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint32(b.Src) + b.Src = b.Src[4:] + return r +} + +// Varint returns a varint int32 from the reader. +func (b *Reader) Varint() int32 { + val, n := Varint(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Varlong returns a varlong int64 from the reader. +func (b *Reader) Varlong() int64 { + val, n := Varlong(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Uvarint returns a uvarint encoded uint32 from the reader. +func (b *Reader) Uvarint() uint32 { + val, n := Uvarint(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Span returns l bytes from the reader. +func (b *Reader) Span(l int) []byte { + if len(b.Src) < l || l < 0 { + b.bad = true + b.Src = nil + return nil + } + r := b.Src[:l:l] + b.Src = b.Src[l:] + return r +} + +// UnsafeString returns a Kafka string from the reader without allocating using +// the unsafe package. This must be used with care; note the string holds a +// reference to the original slice. +func (b *Reader) UnsafeString() string { + l := b.Int16() + return UnsafeString(b.Span(int(l))) +} + +// String returns a Kafka string from the reader. +func (b *Reader) String() string { + l := b.Int16() + return string(b.Span(int(l))) +} + +// UnsafeCompactString returns a Kafka compact string from the reader without +// allocating using the unsafe package. This must be used with care; note the +// string holds a reference to the original slice. +func (b *Reader) UnsafeCompactString() string { + l := int(b.Uvarint()) - 1 + return UnsafeString(b.Span(l)) +} + +// CompactString returns a Kafka compact string from the reader. +func (b *Reader) CompactString() string { + l := int(b.Uvarint()) - 1 + return string(b.Span(l)) +} + +// UnsafeNullableString returns a Kafka nullable string from the reader without +// allocating using the unsafe package. This must be used with care; note the +// string holds a reference to the original slice. +func (b *Reader) UnsafeNullableString() *string { + l := b.Int16() + if l < 0 { + return nil + } + s := UnsafeString(b.Span(int(l))) + return &s +} + +// NullableString returns a Kafka nullable string from the reader. +func (b *Reader) NullableString() *string { + l := b.Int16() + if l < 0 { + return nil + } + s := string(b.Span(int(l))) + return &s +} + +// UnsafeCompactNullableString returns a Kafka compact nullable string from the +// reader without allocating using the unsafe package. This must be used with +// care; note the string holds a reference to the original slice. +func (b *Reader) UnsafeCompactNullableString() *string { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + s := UnsafeString(b.Span(l)) + return &s +} + +// CompactNullableString returns a Kafka compact nullable string from the +// reader. +func (b *Reader) CompactNullableString() *string { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + s := string(b.Span(l)) + return &s +} + +// Bytes returns a Kafka byte array from the reader. +// +// This never returns nil. +func (b *Reader) Bytes() []byte { + l := b.Int32() + // This is not to spec, but it is not clearly documented and Microsoft + // EventHubs fails here. -1 means null, which should throw an + // exception. EventHubs uses -1 to mean "does not exist" on some + // non-nullable fields. + // + // Until EventHubs is fixed, we return an empty byte slice for null. + if l == -1 { + return []byte{} + } + return b.Span(int(l)) +} + +// CompactBytes returns a Kafka compact byte array from the reader. +// +// This never returns nil. +func (b *Reader) CompactBytes() []byte { + l := int(b.Uvarint()) - 1 + if l == -1 { // same as above: -1 should not be allowed here + return []byte{} + } + return b.Span(l) +} + +// NullableBytes returns a Kafka nullable byte array from the reader, returning +// nil as appropriate. +func (b *Reader) NullableBytes() []byte { + l := b.Int32() + if l < 0 { + return nil + } + r := b.Span(int(l)) + return r +} + +// CompactNullableBytes returns a Kafka compact nullable byte array from the +// reader, returning nil as appropriate. +func (b *Reader) CompactNullableBytes() []byte { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + r := b.Span(l) + return r +} + +// ArrayLen returns a Kafka array length from the reader. +func (b *Reader) ArrayLen() int32 { + r := b.Int32() + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// VarintArrayLen returns a Kafka array length from the reader. +func (b *Reader) VarintArrayLen() int32 { + r := b.Varint() + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// CompactArrayLen returns a Kafka compact array length from the reader. +func (b *Reader) CompactArrayLen() int32 { + r := int32(b.Uvarint()) - 1 + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// VarintBytes returns a Kafka encoded varint array from the reader, returning +// nil as appropriate. +func (b *Reader) VarintBytes() []byte { + l := b.Varint() + if l < 0 { + return nil + } + return b.Span(int(l)) +} + +// UnsafeVarintString returns a Kafka encoded varint string from the reader +// without allocating using the unsafe package. This must be used with care; +// note the string holds a reference to the original slice. +func (b *Reader) UnsafeVarintString() string { + return UnsafeString(b.VarintBytes()) +} + +// VarintString returns a Kafka encoded varint string from the reader. +func (b *Reader) VarintString() string { + return string(b.VarintBytes()) +} + +// Complete returns ErrNotEnoughData if the source ran out while decoding. +func (b *Reader) Complete() error { + if b.bad { + return ErrNotEnoughData + } + return nil +} + +// Ok returns true if the reader is still ok. +func (b *Reader) Ok() bool { + return !b.bad +} + +// UnsafeString returns the slice as a string using unsafe rule (6). +func UnsafeString(slice []byte) string { + var str string + strhdr := (*reflect.StringHeader)(unsafe.Pointer(&str)) //nolint:gosec // known way to convert slice to string + strhdr.Data = ((*reflect.SliceHeader)(unsafe.Pointer(&slice))).Data //nolint:gosec // known way to convert slice to string + strhdr.Len = len(slice) + return str +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/record.go b/vendor/github.com/twmb/franz-go/pkg/kmsg/record.go new file mode 100644 index 0000000..86499fd --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/record.go @@ -0,0 +1,174 @@ +package kmsg + +import "github.com/twmb/franz-go/pkg/kmsg/internal/kbin" + +// A Record is a Kafka v0.11.0.0 record. It corresponds to an individual +// message as it is written on the wire. +type Record struct { + // Length is the length of this record on the wire of everything that + // follows this field. It is an int32 encoded as a varint. + Length int32 + + // Attributes are record level attributes. This field currently is unused. + Attributes int8 + + // TimestampDelta is the millisecond delta of this record's timestamp + // from the record's RecordBatch's FirstTimestamp. + // + // NOTE: this is actually an int64 but we cannot change the type for + // backwards compatibility. Use TimestampDelta64. + TimestampDelta int32 + TimestampDelta64 int64 + + // OffsetDelta is the delta of this record's offset from the record's + // RecordBatch's FirstOffset. + // + // For producing, this is usually equal to the index of the record in + // the record batch. + OffsetDelta int32 + + // Key is an blob of data for a record. + // + // Key's are usually used for hashing the record to specific Kafka partitions. + Key []byte + + // Value is a blob of data. This field is the main "message" portion of a + // record. + Value []byte + + // Headers are optional user provided metadata for records. Unlike normal + // arrays, the number of headers is encoded as a varint. + Headers []Header +} + +func (v *Record) AppendTo(dst []byte) []byte { + { + v := v.Length + dst = kbin.AppendVarint(dst, v) + } + { + v := v.Attributes + dst = kbin.AppendInt8(dst, v) + } + { + d := v.TimestampDelta64 + if d == 0 { + d = int64(v.TimestampDelta) + } + dst = kbin.AppendVarlong(dst, d) + } + { + v := v.OffsetDelta + dst = kbin.AppendVarint(dst, v) + } + { + v := v.Key + dst = kbin.AppendVarintBytes(dst, v) + } + { + v := v.Value + dst = kbin.AppendVarintBytes(dst, v) + } + { + v := v.Headers + dst = kbin.AppendVarint(dst, int32(len(v))) + for i := range v { + v := &v[i] + { + v := v.Key + dst = kbin.AppendVarintString(dst, v) + } + { + v := v.Value + dst = kbin.AppendVarintBytes(dst, v) + } + } + } + return dst +} + +func (v *Record) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *Record) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *Record) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + v := b.Varint() + s.Length = v + } + { + v := b.Int8() + s.Attributes = v + } + { + v := b.Varlong() + s.TimestampDelta64 = v + s.TimestampDelta = int32(v) + } + { + v := b.Varint() + s.OffsetDelta = v + } + { + v := b.VarintBytes() + s.Key = v + } + { + v := b.VarintBytes() + s.Value = v + } + { + v := s.Headers + a := v + var l int32 + l = b.VarintArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]Header, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeVarintString() + } else { + v = b.VarintString() + } + s.Key = v + } + { + v := b.VarintBytes() + s.Value = v + } + } + v = a + s.Headers = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to Record. +func (v *Record) Default() { +} + +// NewRecord returns a default Record +// This is a shortcut for creating a struct and calling Default yourself. +func NewRecord() Record { + var v Record + v.Default() + return v +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 955c2e8..e3ab7cd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -303,6 +303,13 @@ github.com/spf13/viper # github.com/stretchr/testify v1.8.1 ## explicit; go 1.13 github.com/stretchr/testify/assert +# github.com/twmb/franz-go v1.17.1 +## explicit; go 1.21 +github.com/twmb/franz-go/pkg/kbin +# github.com/twmb/franz-go/pkg/kmsg v1.8.0 +## explicit; go 1.19 +github.com/twmb/franz-go/pkg/kmsg +github.com/twmb/franz-go/pkg/kmsg/internal/kbin # github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c ## explicit github.com/xdg/scram