1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,158 @@
package kafka
import (
"errors"
"math"
"strings"
"time"
"github.com/IBM/sarama"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/common/tls"
)
// ReadConfig for kafka clients meaning to read from Kafka.
type ReadConfig struct {
Config
}
// SetConfig on the sarama.Config object from the ReadConfig struct.
func (k *ReadConfig) SetConfig(cfg *sarama.Config, log telegraf.Logger) error {
cfg.Consumer.Return.Errors = true
return k.Config.SetConfig(cfg, log)
}
// WriteConfig for kafka clients meaning to write to kafka
type WriteConfig struct {
Config
RequiredAcks int `toml:"required_acks"`
MaxRetry int `toml:"max_retry"`
MaxMessageBytes int `toml:"max_message_bytes"`
IdempotentWrites bool `toml:"idempotent_writes"`
}
// SetConfig on the sarama.Config object from the WriteConfig struct.
func (k *WriteConfig) SetConfig(cfg *sarama.Config, log telegraf.Logger) error {
cfg.Producer.Return.Successes = true
cfg.Producer.Idempotent = k.IdempotentWrites
cfg.Producer.Retry.Max = k.MaxRetry
if k.MaxMessageBytes > 0 {
cfg.Producer.MaxMessageBytes = k.MaxMessageBytes
}
cfg.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks)
if cfg.Producer.Idempotent {
cfg.Net.MaxOpenRequests = 1
}
return k.Config.SetConfig(cfg, log)
}
// Config common to all Kafka clients.
type Config struct {
SASLAuth
tls.ClientConfig
Version string `toml:"version"`
ClientID string `toml:"client_id"`
CompressionCodec int `toml:"compression_codec"`
EnableTLS *bool `toml:"enable_tls"`
KeepAlivePeriod *config.Duration `toml:"keep_alive_period"`
MetadataRetryMax int `toml:"metadata_retry_max"`
MetadataRetryType string `toml:"metadata_retry_type"`
MetadataRetryBackoff config.Duration `toml:"metadata_retry_backoff"`
MetadataRetryMaxDuration config.Duration `toml:"metadata_retry_max_duration"`
// Disable full metadata fetching
MetadataFull *bool `toml:"metadata_full"`
}
type BackoffFunc func(retries, maxRetries int) time.Duration
func makeBackoffFunc(backoff, maxDuration time.Duration) BackoffFunc {
return func(retries, _ int) time.Duration {
d := time.Duration(math.Pow(2, float64(retries))) * backoff
if maxDuration != 0 && d > maxDuration {
return maxDuration
}
return d
}
}
// SetConfig on the sarama.Config object from the Config struct.
func (k *Config) SetConfig(cfg *sarama.Config, log telegraf.Logger) error {
if k.Version != "" {
version, err := sarama.ParseKafkaVersion(k.Version)
if err != nil {
return err
}
cfg.Version = version
}
if k.ClientID != "" {
cfg.ClientID = k.ClientID
} else {
cfg.ClientID = "Telegraf"
}
cfg.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec)
if k.EnableTLS != nil && *k.EnableTLS {
cfg.Net.TLS.Enable = true
}
tlsConfig, err := k.ClientConfig.TLSConfig()
if err != nil {
return err
}
if tlsConfig != nil {
cfg.Net.TLS.Config = tlsConfig
// To maintain backwards compatibility, if the enable_tls option is not
// set TLS is enabled if a non-default TLS config is used.
if k.EnableTLS == nil {
cfg.Net.TLS.Enable = true
}
}
if k.KeepAlivePeriod != nil {
// Defaults to OS setting (15s currently)
cfg.Net.KeepAlive = time.Duration(*k.KeepAlivePeriod)
}
if k.MetadataFull != nil {
// Defaults to true in Sarama
cfg.Metadata.Full = *k.MetadataFull
}
if k.MetadataRetryMax != 0 {
cfg.Metadata.Retry.Max = k.MetadataRetryMax
}
if k.MetadataRetryBackoff != 0 {
// If cfg.Metadata.Retry.BackoffFunc is set, sarama ignores
// cfg.Metadata.Retry.Backoff
cfg.Metadata.Retry.Backoff = time.Duration(k.MetadataRetryBackoff)
}
switch strings.ToLower(k.MetadataRetryType) {
default:
return errors.New("invalid metadata retry type")
case "exponential":
if k.MetadataRetryBackoff == 0 {
k.MetadataRetryBackoff = config.Duration(250 * time.Millisecond)
log.Warnf("metadata_retry_backoff is 0, using %s", time.Duration(k.MetadataRetryBackoff))
}
cfg.Metadata.Retry.BackoffFunc = makeBackoffFunc(
time.Duration(k.MetadataRetryBackoff),
time.Duration(k.MetadataRetryMaxDuration),
)
case "constant", "":
}
return k.SetSASLConfig(cfg)
}

View file

@ -0,0 +1,22 @@
package kafka
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestBackoffFunc(t *testing.T) {
b := 250 * time.Millisecond
limit := 1100 * time.Millisecond
f := makeBackoffFunc(b, limit)
require.Equal(t, b, f(0, 0))
require.Equal(t, b*2, f(1, 0))
require.Equal(t, b*4, f(2, 0))
require.Equal(t, limit, f(3, 0)) // would be 2000 but that's greater than max
f = makeBackoffFunc(b, 0) // max = 0 means no max
require.Equal(t, b*8, f(3, 0)) // with no max, it's 2000
}

View file

@ -0,0 +1,41 @@
package kafka
import (
"sync"
"github.com/IBM/sarama"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/logger"
)
var (
log = logger.New("sarama", "", "")
once sync.Once
)
type debugLogger struct{}
func (*debugLogger) Print(v ...interface{}) {
log.Trace(v...)
}
func (*debugLogger) Printf(format string, v ...interface{}) {
log.Tracef(format, v...)
}
func (l *debugLogger) Println(v ...interface{}) {
l.Print(v...)
}
// SetLogger configures a debug logger for kafka (sarama)
func SetLogger(level telegraf.LogLevel) {
// Set-up the sarama logger only once
once.Do(func() {
sarama.Logger = &debugLogger{}
})
// Increase the log-level if needed.
if !log.Level().Includes(level) {
log.SetLevel(level)
}
}

View file

@ -0,0 +1,127 @@
package kafka
import (
"errors"
"fmt"
"github.com/IBM/sarama"
"github.com/influxdata/telegraf/config"
)
type SASLAuth struct {
SASLUsername config.Secret `toml:"sasl_username"`
SASLPassword config.Secret `toml:"sasl_password"`
SASLExtensions map[string]string `toml:"sasl_extensions"`
SASLMechanism string `toml:"sasl_mechanism"`
SASLVersion *int `toml:"sasl_version"`
// GSSAPI config
SASLGSSAPIServiceName string `toml:"sasl_gssapi_service_name"`
SASLGSSAPIAuthType string `toml:"sasl_gssapi_auth_type"`
SASLGSSAPIDisablePAFXFAST bool `toml:"sasl_gssapi_disable_pafxfast"`
SASLGSSAPIKerberosConfigPath string `toml:"sasl_gssapi_kerberos_config_path"`
SASLGSSAPIKeyTabPath string `toml:"sasl_gssapi_key_tab_path"`
SASLGSSAPIRealm string `toml:"sasl_gssapi_realm"`
// OAUTHBEARER config
SASLAccessToken config.Secret `toml:"sasl_access_token"`
}
// SetSASLConfig configures SASL for kafka (sarama)
func (k *SASLAuth) SetSASLConfig(cfg *sarama.Config) error {
username, err := k.SASLUsername.Get()
if err != nil {
return fmt.Errorf("getting username failed: %w", err)
}
cfg.Net.SASL.User = username.String()
defer username.Destroy()
password, err := k.SASLPassword.Get()
if err != nil {
return fmt.Errorf("getting password failed: %w", err)
}
cfg.Net.SASL.Password = password.String()
defer password.Destroy()
if k.SASLMechanism != "" {
cfg.Net.SASL.Mechanism = sarama.SASLMechanism(k.SASLMechanism)
switch cfg.Net.SASL.Mechanism {
case sarama.SASLTypeSCRAMSHA256:
cfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient {
return &XDGSCRAMClient{HashGeneratorFcn: SHA256}
}
case sarama.SASLTypeSCRAMSHA512:
cfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient {
return &XDGSCRAMClient{HashGeneratorFcn: SHA512}
}
case sarama.SASLTypeOAuth:
cfg.Net.SASL.TokenProvider = k // use self as token provider.
case sarama.SASLTypeGSSAPI:
cfg.Net.SASL.GSSAPI.ServiceName = k.SASLGSSAPIServiceName
cfg.Net.SASL.GSSAPI.AuthType = gssapiAuthType(k.SASLGSSAPIAuthType)
cfg.Net.SASL.GSSAPI.Username = username.String()
cfg.Net.SASL.GSSAPI.Password = password.String()
cfg.Net.SASL.GSSAPI.DisablePAFXFAST = k.SASLGSSAPIDisablePAFXFAST
cfg.Net.SASL.GSSAPI.KerberosConfigPath = k.SASLGSSAPIKerberosConfigPath
cfg.Net.SASL.GSSAPI.KeyTabPath = k.SASLGSSAPIKeyTabPath
cfg.Net.SASL.GSSAPI.Realm = k.SASLGSSAPIRealm
case sarama.SASLTypePlaintext:
// nothing.
default:
}
}
if !k.SASLUsername.Empty() || k.SASLMechanism != "" {
cfg.Net.SASL.Enable = true
version, err := SASLVersion(cfg.Version, k.SASLVersion)
if err != nil {
return err
}
cfg.Net.SASL.Version = version
}
return nil
}
// Token does nothing smart, it just grabs a hard-coded token from config.
func (k *SASLAuth) Token() (*sarama.AccessToken, error) {
token, err := k.SASLAccessToken.Get()
if err != nil {
return nil, fmt.Errorf("getting token failed: %w", err)
}
defer token.Destroy()
return &sarama.AccessToken{
Token: token.String(),
Extensions: k.SASLExtensions,
}, nil
}
func SASLVersion(kafkaVersion sarama.KafkaVersion, saslVersion *int) (int16, error) {
if saslVersion == nil {
if kafkaVersion.IsAtLeast(sarama.V1_0_0_0) {
return sarama.SASLHandshakeV1, nil
}
return sarama.SASLHandshakeV0, nil
}
switch *saslVersion {
case 0:
return sarama.SASLHandshakeV0, nil
case 1:
return sarama.SASLHandshakeV1, nil
default:
return 0, errors.New("invalid SASL version")
}
}
func gssapiAuthType(authType string) int {
switch authType {
case "KRB5_USER_AUTH":
return sarama.KRB5_USER_AUTH
case "KRB5_KEYTAB_AUTH":
return sarama.KRB5_KEYTAB_AUTH
default:
return 0
}
}

View file

@ -0,0 +1,35 @@
package kafka
import (
"crypto/sha256"
"crypto/sha512"
"hash"
"github.com/xdg/scram"
)
var SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() }
var SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() }
type XDGSCRAMClient struct {
*scram.Client
*scram.ClientConversation
scram.HashGeneratorFcn
}
func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) {
x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID)
if err != nil {
return err
}
x.ClientConversation = x.Client.NewConversation()
return nil
}
func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) {
return x.ClientConversation.Step(challenge)
}
func (x *XDGSCRAMClient) Done() bool {
return x.ClientConversation.Done()
}