1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,23 @@
package ratelimiter
import (
"errors"
"time"
"github.com/influxdata/telegraf/config"
)
type RateLimitConfig struct {
Limit config.Size `toml:"rate_limit"`
Period config.Duration `toml:"rate_limit_period"`
}
func (cfg *RateLimitConfig) CreateRateLimiter() (*RateLimiter, error) {
if cfg.Limit > 0 && cfg.Period <= 0 {
return nil, errors.New("invalid period for rate-limit")
}
return &RateLimiter{
limit: int64(cfg.Limit),
period: time.Duration(cfg.Period),
}, nil
}

View file

@ -0,0 +1,66 @@
package ratelimiter
import (
"errors"
"math"
"time"
)
var (
ErrLimitExceeded = errors.New("not enough tokens")
)
type RateLimiter struct {
limit int64
period time.Duration
periodStart time.Time
remaining int64
}
func (r *RateLimiter) Remaining(t time.Time) int64 {
if r.limit == 0 {
return math.MaxInt64
}
// Check for corner case
if !r.periodStart.Before(t) {
return 0
}
// We are in a new period, so the complete size is available
deltat := t.Sub(r.periodStart)
if deltat >= r.period {
return r.limit
}
return r.remaining
}
func (r *RateLimiter) Accept(t time.Time, used int64) {
if r.limit == 0 || r.periodStart.After(t) {
return
}
// Remember the first query and reset if we are in a new period
if r.periodStart.IsZero() {
r.periodStart = t
r.remaining = r.limit
} else if deltat := t.Sub(r.periodStart); deltat >= r.period {
r.periodStart = r.periodStart.Add(deltat.Truncate(r.period))
r.remaining = r.limit
}
// Update the state
r.remaining = max(r.remaining-used, 0)
}
func (r *RateLimiter) Undo(t time.Time, used int64) {
// Do nothing if we are not in the current period or unlimited because we
// already reset the limit on a new window.
if r.limit == 0 || r.periodStart.IsZero() || r.periodStart.After(t) || t.Sub(r.periodStart) >= r.period {
return
}
// Undo the state update
r.remaining = min(r.remaining+used, r.limit)
}

View file

@ -0,0 +1,189 @@
package ratelimiter
import (
"math"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/config"
)
func TestInvalidPeriod(t *testing.T) {
cfg := &RateLimitConfig{Limit: config.Size(1024)}
_, err := cfg.CreateRateLimiter()
require.ErrorContains(t, err, "invalid period for rate-limit")
}
func TestUnlimited(t *testing.T) {
cfg := &RateLimitConfig{}
limiter, err := cfg.CreateRateLimiter()
require.NoError(t, err)
start := time.Now()
end := start.Add(30 * time.Minute)
for ts := start; ts.Before(end); ts = ts.Add(1 * time.Minute) {
require.EqualValues(t, int64(math.MaxInt64), limiter.Remaining(ts))
}
}
func TestUnlimitedWithPeriod(t *testing.T) {
cfg := &RateLimitConfig{
Period: config.Duration(5 * time.Minute),
}
limiter, err := cfg.CreateRateLimiter()
require.NoError(t, err)
start := time.Now()
end := start.Add(30 * time.Minute)
for ts := start; ts.Before(end); ts = ts.Add(1 * time.Minute) {
require.EqualValues(t, int64(math.MaxInt64), limiter.Remaining(ts))
}
}
func TestLimited(t *testing.T) {
tests := []struct {
name string
cfg *RateLimitConfig
step time.Duration
request []int64
expected []int64
}{
{
name: "constant usage",
cfg: &RateLimitConfig{
Limit: config.Size(1024),
Period: config.Duration(5 * time.Minute),
},
step: time.Minute,
request: []int64{300},
expected: []int64{1024, 724, 424, 124, 0, 1024, 724, 424, 124, 0},
},
{
name: "variable usage",
cfg: &RateLimitConfig{
Limit: config.Size(1024),
Period: config.Duration(5 * time.Minute),
},
step: time.Minute,
request: []int64{256, 128, 512, 64, 64, 1024, 0, 0, 0, 0, 128, 4096, 4096, 4096, 4096, 4096},
expected: []int64{1024, 768, 640, 128, 64, 1024, 0, 0, 0, 0, 1024, 896, 0, 0, 0, 1024},
},
}
// Run the test with an offset of period multiples
for _, tt := range tests {
t.Run(tt.name+" at period", func(t *testing.T) {
// Setup the limiter
limiter, err := tt.cfg.CreateRateLimiter()
require.NoError(t, err)
// Compute the actual values
start := time.Now().Truncate(tt.step)
for i, expected := range tt.expected {
ts := start.Add(time.Duration(i) * tt.step)
remaining := limiter.Remaining(ts)
use := min(remaining, tt.request[i%len(tt.request)])
require.Equalf(t, expected, remaining, "mismatch at index %d", i)
limiter.Accept(ts, use)
}
})
}
// Run the test at a time of period multiples
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Setup the limiter
limiter, err := tt.cfg.CreateRateLimiter()
require.NoError(t, err)
// Compute the actual values
start := time.Now().Truncate(tt.step).Add(1 * time.Second)
for i, expected := range tt.expected {
ts := start.Add(time.Duration(i) * tt.step)
remaining := limiter.Remaining(ts)
use := min(remaining, tt.request[i%len(tt.request)])
require.Equalf(t, expected, remaining, "mismatch at index %d", i)
limiter.Accept(ts, use)
}
})
}
}
func TestUndo(t *testing.T) {
tests := []struct {
name string
cfg *RateLimitConfig
step time.Duration
request []int64
expected []int64
}{
{
name: "constant usage",
cfg: &RateLimitConfig{
Limit: config.Size(1024),
Period: config.Duration(5 * time.Minute),
},
step: time.Minute,
request: []int64{300},
expected: []int64{1024, 724, 424, 124, 124, 1024, 724, 424, 124, 124},
},
{
name: "variable usage",
cfg: &RateLimitConfig{
Limit: config.Size(1024),
Period: config.Duration(5 * time.Minute),
},
step: time.Minute,
request: []int64{256, 128, 512, 64, 64, 1024, 0, 0, 0, 0, 128, 4096, 4096, 4096, 4096, 4096},
expected: []int64{1024, 768, 640, 128, 64, 1024, 0, 0, 0, 0, 1024, 896, 896, 896, 896, 1024},
},
}
// Run the test with an offset of period multiples
for _, tt := range tests {
t.Run(tt.name+" at period", func(t *testing.T) {
// Setup the limiter
limiter, err := tt.cfg.CreateRateLimiter()
require.NoError(t, err)
// Compute the actual values
start := time.Now().Truncate(tt.step)
for i, expected := range tt.expected {
ts := start.Add(time.Duration(i) * tt.step)
remaining := limiter.Remaining(ts)
use := min(remaining, tt.request[i%len(tt.request)])
require.Equalf(t, expected, remaining, "mismatch at index %d", i)
limiter.Accept(ts, use)
// Undo too large operations
if tt.request[i%len(tt.request)] > remaining {
limiter.Undo(ts, use)
}
}
})
}
// Run the test at a time of period multiples
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Setup the limiter
limiter, err := tt.cfg.CreateRateLimiter()
require.NoError(t, err)
// Compute the actual values
start := time.Now().Truncate(tt.step).Add(1 * time.Second)
for i, expected := range tt.expected {
ts := start.Add(time.Duration(i) * tt.step)
remaining := limiter.Remaining(ts)
use := min(remaining, tt.request[i%len(tt.request)])
require.Equalf(t, expected, remaining, "mismatch at index %d", i)
limiter.Accept(ts, use)
// Undo too large operations
if tt.request[i%len(tt.request)] > remaining {
limiter.Undo(ts, use)
}
}
})
}
}

View file

@ -0,0 +1,100 @@
package ratelimiter
import (
"bytes"
"math"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
)
// Serializer interface abstracting the different implementations of a
// limited-size serializer
type Serializer interface {
Serialize(metric telegraf.Metric, limit int64) ([]byte, error)
SerializeBatch(metrics []telegraf.Metric, limit int64) ([]byte, error)
}
// Individual serializers do serialize each metric individually using the
// serializer's Serialize() function and add the resulting output to the buffer
// until the limit is reached. This only works for serializers NOT requiring
// the serialization of a batch as-a-whole.
type IndividualSerializer struct {
serializer telegraf.Serializer
buffer *bytes.Buffer
}
func NewIndividualSerializer(s telegraf.Serializer) *IndividualSerializer {
return &IndividualSerializer{
serializer: s,
buffer: &bytes.Buffer{},
}
}
func (s *IndividualSerializer) Serialize(metric telegraf.Metric, limit int64) ([]byte, error) {
// Do the serialization
buf, err := s.serializer.Serialize(metric)
if err != nil {
return nil, err
}
// The serialized metric fits into the limit, so output it
if buflen := int64(len(buf)); buflen <= limit {
return buf, nil
}
// The serialized metric exceeds the limit
return nil, internal.ErrSizeLimitReached
}
func (s *IndividualSerializer) SerializeBatch(metrics []telegraf.Metric, limit int64) ([]byte, error) {
// Grow the buffer so it can hold at least the required size. This will
// save us from reallocate often
s.buffer.Reset()
if limit > 0 && limit > int64(s.buffer.Cap()) && limit < int64(math.MaxInt) {
s.buffer.Grow(int(limit))
}
// Prepare a potential write error and be optimistic
werr := &internal.PartialWriteError{
MetricsAccept: make([]int, 0, len(metrics)),
}
// Iterate through the metrics, serialize them and add them to the output
// buffer if they are within the size limit.
var used int64
for i, m := range metrics {
buf, err := s.serializer.Serialize(m)
if err != nil {
// Failing serialization is a fatal error so mark the metric as such
werr.Err = internal.ErrSerialization
werr.MetricsReject = append(werr.MetricsReject, i)
werr.MetricsRejectErrors = append(werr.MetricsRejectErrors, err)
continue
}
// The serialized metric fits into the limit, so add it to the output
if usedAdded := used + int64(len(buf)); usedAdded <= limit {
if _, err := s.buffer.Write(buf); err != nil {
return nil, err
}
werr.MetricsAccept = append(werr.MetricsAccept, i)
used = usedAdded
continue
}
// Return only the size-limit-reached error if all metrics failed.
if used == 0 {
return nil, internal.ErrSizeLimitReached
}
// Adding the serialized metric would exceed the limit so exit with an
// WriteError and fill in the required information
werr.Err = internal.ErrSizeLimitReached
break
}
if werr.Err != nil {
return s.buffer.Bytes(), werr
}
return s.buffer.Bytes(), nil
}

View file

@ -0,0 +1,352 @@
package ratelimiter
import (
"math"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/serializers/influx"
)
func TestIndividualSerializer(t *testing.T) {
input := []telegraf.Metric{
metric.New(
"serializer_test",
map[string]string{
"source": "localhost",
"location": "factory_north",
"machine": "A",
"status": "ok",
},
map[string]interface{}{
"operating_hours": 123,
"temperature": 25.0,
"pressure": 1023.4,
},
time.Unix(1722443551, 0),
),
metric.New(
"serializer_test",
map[string]string{
"source": "localhost",
"location": "factory_north",
"machine": "B",
"status": "failed",
},
map[string]interface{}{
"operating_hours": 8430,
"temperature": 65.2,
"pressure": 985.9,
},
time.Unix(1722443554, 0),
),
metric.New(
"serializer_test",
map[string]string{
"source": "localhost",
"location": "factory_north",
"machine": "C",
"status": "warning",
},
map[string]interface{}{
"operating_hours": 6765,
"temperature": 42.5,
"pressure": 986.1,
},
time.Unix(1722443555, 0),
),
metric.New(
"device",
map[string]string{
"source": "localhost",
"location": "factory_north",
},
map[string]interface{}{
"status": "ok",
},
time.Unix(1722443556, 0),
),
metric.New(
"serializer_test",
map[string]string{
"source": "gateway_af43e",
"location": "factory_south",
"machine": "A",
"status": "ok",
},
map[string]interface{}{
"operating_hours": 5544,
"temperature": 18.6,
"pressure": 1069.4,
},
time.Unix(1722443552, 0),
),
metric.New(
"serializer_test",
map[string]string{
"source": "gateway_af43e",
"location": "factory_south",
"machine": "B",
"status": "ok",
},
map[string]interface{}{
"operating_hours": 65,
"temperature": 29.7,
"pressure": 1101.2,
},
time.Unix(1722443553, 0),
),
metric.New(
"device",
map[string]string{
"source": "gateway_af43e",
"location": "factory_south",
},
map[string]interface{}{
"status": "ok",
},
time.Unix(1722443559, 0),
),
metric.New(
"serializer_test",
map[string]string{
"source": "gateway_af43e",
"location": "factory_south",
"machine": "C",
"status": "off",
},
map[string]interface{}{
"operating_hours": 0,
"temperature": 0.0,
"pressure": 0.0,
},
time.Unix(1722443562, 0),
),
}
//nolint:lll // Resulting metrics should not be wrapped for readability
expected := []string{
"serializer_test,location=factory_north,machine=A,source=localhost,status=ok operating_hours=123i,pressure=1023.4,temperature=25 1722443551000000000\n" +
"serializer_test,location=factory_north,machine=B,source=localhost,status=failed operating_hours=8430i,pressure=985.9,temperature=65.2 1722443554000000000\n",
"serializer_test,location=factory_north,machine=C,source=localhost,status=warning operating_hours=6765i,pressure=986.1,temperature=42.5 1722443555000000000\n" +
"device,location=factory_north,source=localhost status=\"ok\" 1722443556000000000\n" +
"serializer_test,location=factory_south,machine=A,source=gateway_af43e,status=ok operating_hours=5544i,pressure=1069.4,temperature=18.6 1722443552000000000\n",
"serializer_test,location=factory_south,machine=B,source=gateway_af43e,status=ok operating_hours=65i,pressure=1101.2,temperature=29.7 1722443553000000000\n" +
"device,location=factory_south,source=gateway_af43e status=\"ok\" 1722443559000000000\n" +
"serializer_test,location=factory_south,machine=C,source=gateway_af43e,status=off operating_hours=0i,pressure=0,temperature=0 1722443562000000000\n",
}
// Setup the limited serializer
s := &influx.Serializer{SortFields: true}
require.NoError(t, s.Init())
serializer := NewIndividualSerializer(s)
var werr *internal.PartialWriteError
// Do the first serialization runs with all metrics
buf, err := serializer.SerializeBatch(input, 400)
require.ErrorAs(t, err, &werr)
require.ErrorIs(t, werr.Err, internal.ErrSizeLimitReached)
require.EqualValues(t, []int{0, 1}, werr.MetricsAccept)
require.Empty(t, werr.MetricsReject)
require.Equal(t, expected[0], string(buf))
// Run again with the successful metrics removed
buf, err = serializer.SerializeBatch(input[2:], 400)
require.ErrorAs(t, err, &werr)
require.ErrorIs(t, werr.Err, internal.ErrSizeLimitReached)
require.EqualValues(t, []int{0, 1, 2}, werr.MetricsAccept)
require.Empty(t, werr.MetricsReject)
require.Equal(t, expected[1], string(buf))
// Final run with the successful metrics removed
buf, err = serializer.SerializeBatch(input[5:], 400)
require.NoError(t, err)
require.Equal(t, expected[2], string(buf))
}
func TestIndividualSerializerFirstTooBig(t *testing.T) {
input := []telegraf.Metric{
metric.New(
"serializer_test",
map[string]string{
"source": "localhost",
"location": "factory_north",
"machine": "A",
"status": "ok",
},
map[string]interface{}{
"operating_hours": 123,
"temperature": 25.0,
"pressure": 1023.4,
},
time.Unix(1722443551, 0),
),
metric.New(
"serializer_test",
map[string]string{
"source": "localhost",
"location": "factory_north",
"machine": "B",
"status": "failed",
},
map[string]interface{}{
"operating_hours": 8430,
"temperature": 65.2,
"pressure": 985.9,
},
time.Unix(1722443554, 0),
),
}
// Setup the limited serializer
s := &influx.Serializer{SortFields: true}
require.NoError(t, s.Init())
serializer := NewIndividualSerializer(s)
// The first metric will already exceed the size so all metrics fail and
// we expect a shortcut error.
buf, err := serializer.SerializeBatch(input, 100)
require.ErrorIs(t, err, internal.ErrSizeLimitReached)
require.Empty(t, buf)
}
func TestIndividualSerializerUnlimited(t *testing.T) {
input := []telegraf.Metric{
metric.New(
"serializer_test",
map[string]string{
"source": "localhost",
"location": "factory_north",
"machine": "A",
"status": "ok",
},
map[string]interface{}{
"operating_hours": 123,
"temperature": 25.0,
"pressure": 1023.4,
},
time.Unix(1722443551, 0),
),
metric.New(
"serializer_test",
map[string]string{
"source": "localhost",
"location": "factory_north",
"machine": "B",
"status": "failed",
},
map[string]interface{}{
"operating_hours": 8430,
"temperature": 65.2,
"pressure": 985.9,
},
time.Unix(1722443554, 0),
),
metric.New(
"serializer_test",
map[string]string{
"source": "localhost",
"location": "factory_north",
"machine": "C",
"status": "warning",
},
map[string]interface{}{
"operating_hours": 6765,
"temperature": 42.5,
"pressure": 986.1,
},
time.Unix(1722443555, 0),
),
metric.New(
"device",
map[string]string{
"source": "localhost",
"location": "factory_north",
},
map[string]interface{}{
"status": "ok",
},
time.Unix(1722443556, 0),
),
metric.New(
"serializer_test",
map[string]string{
"source": "gateway_af43e",
"location": "factory_south",
"machine": "A",
"status": "ok",
},
map[string]interface{}{
"operating_hours": 5544,
"temperature": 18.6,
"pressure": 1069.4,
},
time.Unix(1722443552, 0),
),
metric.New(
"serializer_test",
map[string]string{
"source": "gateway_af43e",
"location": "factory_south",
"machine": "B",
"status": "ok",
},
map[string]interface{}{
"operating_hours": 65,
"temperature": 29.7,
"pressure": 1101.2,
},
time.Unix(1722443553, 0),
),
metric.New(
"device",
map[string]string{
"source": "gateway_af43e",
"location": "factory_south",
},
map[string]interface{}{
"status": "ok",
},
time.Unix(1722443559, 0),
),
metric.New(
"serializer_test",
map[string]string{
"source": "gateway_af43e",
"location": "factory_south",
"machine": "C",
"status": "off",
},
map[string]interface{}{
"operating_hours": 0,
"temperature": 0.0,
"pressure": 0.0,
},
time.Unix(1722443562, 0),
),
}
//nolint:lll // Resulting metrics should not be wrapped for readability
expected := "serializer_test,location=factory_north,machine=A,source=localhost,status=ok operating_hours=123i,pressure=1023.4,temperature=25 1722443551000000000\n" +
"serializer_test,location=factory_north,machine=B,source=localhost,status=failed operating_hours=8430i,pressure=985.9,temperature=65.2 1722443554000000000\n" +
"serializer_test,location=factory_north,machine=C,source=localhost,status=warning operating_hours=6765i,pressure=986.1,temperature=42.5 1722443555000000000\n" +
"device,location=factory_north,source=localhost status=\"ok\" 1722443556000000000\n" +
"serializer_test,location=factory_south,machine=A,source=gateway_af43e,status=ok operating_hours=5544i,pressure=1069.4,temperature=18.6 1722443552000000000\n" +
"serializer_test,location=factory_south,machine=B,source=gateway_af43e,status=ok operating_hours=65i,pressure=1101.2,temperature=29.7 1722443553000000000\n" +
"device,location=factory_south,source=gateway_af43e status=\"ok\" 1722443559000000000\n" +
"serializer_test,location=factory_south,machine=C,source=gateway_af43e,status=off operating_hours=0i,pressure=0,temperature=0 1722443562000000000\n"
// Setup the limited serializer
s := &influx.Serializer{SortFields: true}
require.NoError(t, s.Init())
serializer := NewIndividualSerializer(s)
// Do the first serialization runs with all metrics
buf, err := serializer.SerializeBatch(input, math.MaxInt64)
require.NoError(t, err)
require.Equal(t, expected, string(buf))
}