1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1 @@
package all

View file

@ -0,0 +1,5 @@
//go:build !custom || aggregators || aggregators.basicstats
package all
import _ "github.com/influxdata/telegraf/plugins/aggregators/basicstats" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || aggregators || aggregators.derivative
package all
import _ "github.com/influxdata/telegraf/plugins/aggregators/derivative" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || aggregators || aggregators.final
package all
import _ "github.com/influxdata/telegraf/plugins/aggregators/final" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || aggregators || aggregators.histogram
package all
import _ "github.com/influxdata/telegraf/plugins/aggregators/histogram" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || aggregators || aggregators.merge
package all
import _ "github.com/influxdata/telegraf/plugins/aggregators/merge" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || aggregators || aggregators.minmax
package all
import _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || aggregators || aggregators.quantile
package all
import _ "github.com/influxdata/telegraf/plugins/aggregators/quantile" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || aggregators || aggregators.starlark
package all
import _ "github.com/influxdata/telegraf/plugins/aggregators/starlark" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || aggregators || aggregators.valuecounter
package all
import _ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter" // register plugin

View file

@ -0,0 +1,74 @@
# Basic Statistics Aggregator Plugin
This plugin computes basic statistics such as counts, differences, minima,
maxima, mean values, non-negative differences etc. for a set of metrics and
emits these statistical values every `period`.
⭐ Telegraf v1.5.0
🏷️ statistics
💻 all
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Keep the aggregate basicstats of each metric passing through.
[[aggregators.basicstats]]
## The period on which to flush & clear the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
## Configures which basic stats to push as fields
# stats = ["count","min","max","mean","variance","stdev"]
```
- stats
- If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are
aggregated and pushed as fields. Other fields are not aggregated by default
to maintain backwards compatibility.
- If empty array, no stats are aggregated
## Measurements & Fields
- measurement1
- field1_count
- field1_diff (difference)
- field1_rate (rate per second)
- field1_max
- field1_min
- field1_mean
- field1_non_negative_diff (non-negative difference)
- field1_non_negative_rate (non-negative rate per second)
- field1_percent_change
- field1_sum
- field1_s2 (variance)
- field1_stdev (standard deviation)
- field1_interval (interval in nanoseconds)
- field1_last (last aggregated value)
- field1_first (first aggregated value)
## Tags
No tags are applied by this aggregator.
## Example Output
```text
system,host=tars load1=1 1475583980000000000
system,host=tars load1=1 1475583990000000000
system,host=tars load1_count=2,load1_diff=0,load1_rate=0,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0,load1_interval=10000000000i,load1_last=1 1475584010000000000
system,host=tars load1=1 1475584020000000000
system,host=tars load1=3 1475584030000000000
system,host=tars load1_count=2,load1_diff=2,load1_rate=0.2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162,load1_interval=10000000000i,load1_last=3,load1_first=3 1475584010000000000
```

View file

@ -0,0 +1,325 @@
//go:generate ../../../tools/readme_config_includer/generator
package basicstats
import (
_ "embed"
"math"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
//go:embed sample.conf
var sampleConfig string
type BasicStats struct {
Stats []string `toml:"stats"`
Log telegraf.Logger
cache map[uint64]aggregate
statsConfig *configuredStats
}
type configuredStats struct {
count bool
min bool
max bool
mean bool
variance bool
stdev bool
sum bool
diff bool
nonNegativeDiff bool
rate bool
nonNegativeRate bool
percentChange bool
interval bool
last bool
first bool
}
func NewBasicStats() *BasicStats {
return &BasicStats{
cache: make(map[uint64]aggregate),
}
}
type aggregate struct {
fields map[string]basicstats
name string
tags map[string]string
}
type basicstats struct {
count float64
min float64
max float64
sum float64
mean float64
diff float64
rate float64
interval time.Duration
last float64
first float64
M2 float64 // intermediate value for variance/stdev
PREVIOUS float64 // intermediate value for diff
TIME time.Time // intermediate value for rate
}
func (*BasicStats) SampleConfig() string {
return sampleConfig
}
func (b *BasicStats) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := b.cache[id]; !ok {
// hit an uncached metric, create caches for first time:
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fields: make(map[string]basicstats),
}
for _, field := range in.FieldList() {
if fv, ok := convert(field.Value); ok {
a.fields[field.Key] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
sum: fv,
diff: 0.0,
rate: 0.0,
last: fv,
first: fv,
M2: 0.0,
PREVIOUS: fv,
TIME: in.Time(),
}
}
}
b.cache[id] = a
} else {
for _, field := range in.FieldList() {
if fv, ok := convert(field.Value); ok {
if _, ok := b.cache[id].fields[field.Key]; !ok {
// hit an uncached field of a cached metric
b.cache[id].fields[field.Key] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
sum: fv,
diff: 0.0,
rate: 0.0,
interval: 0,
last: fv,
first: fv,
M2: 0.0,
PREVIOUS: fv,
TIME: in.Time(),
}
continue
}
tmp := b.cache[id].fields[field.Key]
// https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance
// variable initialization
x := fv
mean := tmp.mean
m2 := tmp.M2
// counter compute
n := tmp.count + 1
tmp.count = n
// mean compute
delta := x - mean
mean = mean + delta/n
tmp.mean = mean
// variance/stdev compute
m2 = m2 + delta*(x-mean)
tmp.M2 = m2
// max/min compute
if fv < tmp.min {
tmp.min = fv
} else if fv > tmp.max {
tmp.max = fv
}
// sum compute
tmp.sum += fv
// diff compute
tmp.diff = fv - tmp.PREVIOUS
// interval compute
tmp.interval = in.Time().Sub(tmp.TIME)
// rate compute
if !in.Time().Equal(tmp.TIME) {
tmp.rate = tmp.diff / tmp.interval.Seconds()
}
// last compute
tmp.last = fv
// store final data
b.cache[id].fields[field.Key] = tmp
}
}
}
}
func (b *BasicStats) Push(acc telegraf.Accumulator) {
for _, aggregate := range b.cache {
fields := make(map[string]interface{})
for k, v := range aggregate.fields {
if b.statsConfig.count {
fields[k+"_count"] = v.count
}
if b.statsConfig.min {
fields[k+"_min"] = v.min
}
if b.statsConfig.max {
fields[k+"_max"] = v.max
}
if b.statsConfig.mean {
fields[k+"_mean"] = v.mean
}
if b.statsConfig.sum {
fields[k+"_sum"] = v.sum
}
if b.statsConfig.last {
fields[k+"_last"] = v.last
}
if b.statsConfig.first {
fields[k+"_first"] = v.first
}
// v.count always >=1
if v.count > 1 {
variance := v.M2 / (v.count - 1)
if b.statsConfig.variance {
fields[k+"_s2"] = variance
}
if b.statsConfig.stdev {
fields[k+"_stdev"] = math.Sqrt(variance)
}
if b.statsConfig.diff {
fields[k+"_diff"] = v.diff
}
if b.statsConfig.nonNegativeDiff && v.diff >= 0 {
fields[k+"_non_negative_diff"] = v.diff
}
if b.statsConfig.rate {
fields[k+"_rate"] = v.rate
}
if b.statsConfig.percentChange {
fields[k+"_percent_change"] = v.diff / v.PREVIOUS * 100
}
if b.statsConfig.nonNegativeRate && v.diff >= 0 {
fields[k+"_non_negative_rate"] = v.rate
}
if b.statsConfig.interval {
fields[k+"_interval"] = v.interval.Nanoseconds()
}
}
// if count == 1 StdDev = infinite => so I won't send data
}
if len(fields) > 0 {
acc.AddFields(aggregate.name, fields, aggregate.tags)
}
}
}
// member function for logging.
func (b *BasicStats) parseStats() *configuredStats {
parsed := &configuredStats{}
for _, name := range b.Stats {
switch name {
case "count":
parsed.count = true
case "min":
parsed.min = true
case "max":
parsed.max = true
case "mean":
parsed.mean = true
case "s2":
parsed.variance = true
case "stdev":
parsed.stdev = true
case "sum":
parsed.sum = true
case "diff":
parsed.diff = true
case "non_negative_diff":
parsed.nonNegativeDiff = true
case "rate":
parsed.rate = true
case "non_negative_rate":
parsed.nonNegativeRate = true
case "percent_change":
parsed.percentChange = true
case "interval":
parsed.interval = true
case "last":
parsed.last = true
case "first":
parsed.first = true
default:
b.Log.Warnf("Unrecognized basic stat %q, ignoring", name)
}
}
return parsed
}
func (b *BasicStats) initConfiguredStats() {
if b.Stats == nil {
b.statsConfig = &configuredStats{
count: true,
min: true,
max: true,
mean: true,
variance: true,
stdev: true,
sum: false,
diff: false,
nonNegativeDiff: false,
rate: false,
nonNegativeRate: false,
percentChange: false,
interval: false,
last: false,
first: false,
}
} else {
b.statsConfig = b.parseStats()
}
}
func (b *BasicStats) Reset() {
b.cache = make(map[uint64]aggregate)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
case uint64:
return float64(v), true
default:
return 0, false
}
}
func (b *BasicStats) Init() error {
b.initConfiguredStats()
return nil
}
func init() {
aggregators.Add("basicstats", func() telegraf.Aggregator {
return NewBasicStats()
})
}

View file

@ -0,0 +1,826 @@
package basicstats
import (
"math"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
var m1 = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(1),
"c": float64(2),
"d": float64(2),
"g": int64(3),
},
time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC),
)
var m2 = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(3),
"c": float64(4),
"d": float64(6),
"e": float64(200),
"f": uint64(200),
"ignoreme": "string",
"andme": true,
"g": int64(1),
},
time.Date(2000, 1, 1, 0, 0, 0, 1e6, time.UTC),
)
func BenchmarkApply(b *testing.B) {
minmax := NewBasicStats()
minmax.Log = testutil.Logger{}
minmax.initConfiguredStats()
for n := 0; n < b.N; n++ {
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added.
func TestBasicStatsWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Log = testutil.Logger{}
minmax.initConfiguredStats()
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2), // a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_stdev": float64(0),
"a_s2": float64(0),
"b_count": float64(2), // b
"b_max": float64(3),
"b_min": float64(1),
"b_mean": float64(2),
"b_s2": float64(2),
"b_stdev": math.Sqrt(2),
"c_count": float64(2), // c
"c_max": float64(4),
"c_min": float64(2),
"c_mean": float64(3),
"c_s2": float64(2),
"c_stdev": math.Sqrt(2),
"d_count": float64(2), // d
"d_max": float64(6),
"d_min": float64(2),
"d_mean": float64(4),
"d_s2": float64(8),
"d_stdev": math.Sqrt(8),
"e_count": float64(1), // e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
"f_count": float64(1), // f
"f_max": float64(200),
"f_min": float64(200),
"f_mean": float64(200),
"g_count": float64(2), // g
"g_max": float64(3),
"g_min": float64(1),
"g_mean": float64(2),
"g_s2": float64(2),
"g_stdev": math.Sqrt(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestBasicStatsDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Stats = []string{"count", "max", "min", "mean", "last", "first"}
minmax.Log = testutil.Logger{}
minmax.initConfiguredStats()
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(1), // a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_last": float64(1),
"a_first": float64(1),
"b_count": float64(1), // b
"b_max": float64(1),
"b_min": float64(1),
"b_mean": float64(1),
"b_last": float64(1),
"b_first": float64(1),
"c_count": float64(1), // c
"c_max": float64(2),
"c_min": float64(2),
"c_mean": float64(2),
"c_last": float64(2),
"c_first": float64(2),
"d_count": float64(1), // d
"d_max": float64(2),
"d_min": float64(2),
"d_mean": float64(2),
"d_last": float64(2),
"d_first": float64(2),
"g_count": float64(1), // g
"g_max": float64(3),
"g_min": float64(3),
"g_mean": float64(3),
"g_last": float64(3),
"g_first": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_count": float64(1), // a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_last": float64(1),
"a_first": float64(1),
"b_count": float64(1), // b
"b_max": float64(3),
"b_min": float64(3),
"b_mean": float64(3),
"b_last": float64(3),
"b_first": float64(3),
"c_count": float64(1), // c
"c_max": float64(4),
"c_min": float64(4),
"c_mean": float64(4),
"c_last": float64(4),
"c_first": float64(4),
"d_count": float64(1), // d
"d_max": float64(6),
"d_min": float64(6),
"d_mean": float64(6),
"d_last": float64(6),
"d_first": float64(6),
"e_count": float64(1), // e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
"e_last": float64(200),
"e_first": float64(200),
"f_count": float64(1), // f
"f_max": float64(200),
"f_min": float64(200),
"f_mean": float64(200),
"f_last": float64(200),
"f_first": float64(200),
"g_count": float64(1), // g
"g_max": float64(1),
"g_min": float64(1),
"g_mean": float64(1),
"g_last": float64(1),
"g_first": float64(1),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating count
func TestBasicStatsWithOnlyCount(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"count"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2),
"b_count": float64(2),
"c_count": float64(2),
"d_count": float64(2),
"e_count": float64(1),
"f_count": float64(1),
"g_count": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating minimum
func TestBasicStatsWithOnlyMin(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"min"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_min": float64(1),
"b_min": float64(1),
"c_min": float64(2),
"d_min": float64(2),
"e_min": float64(200),
"f_min": float64(200),
"g_min": float64(1),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating maximum
func TestBasicStatsWithOnlyMax(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"max"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1),
"b_max": float64(3),
"c_max": float64(4),
"d_max": float64(6),
"e_max": float64(200),
"f_max": float64(200),
"g_max": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating mean
func TestBasicStatsWithOnlyMean(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"mean"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_mean": float64(1),
"b_mean": float64(2),
"c_mean": float64(3),
"d_mean": float64(4),
"e_mean": float64(200),
"f_mean": float64(200),
"g_mean": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating sum
func TestBasicStatsWithOnlySum(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"sum"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_sum": float64(2),
"b_sum": float64(4),
"c_sum": float64(6),
"d_sum": float64(8),
"e_sum": float64(200),
"f_sum": float64(200),
"g_sum": float64(4),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Verify that sum doesn't suffer from floating point errors. Early
// implementations of sum were calculated from mean and count, which
// e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8.
func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) {
var sum1 = metric.New("m1",
map[string]string{},
map[string]interface{}{
"a": int64(1),
},
time.Now(),
)
var sum2 = metric.New("m1",
map[string]string{},
map[string]interface{}{
"a": int64(1),
},
time.Now(),
)
var sum3 = metric.New("m1",
map[string]string{},
map[string]interface{}{
"a": int64(5),
},
time.Now(),
)
var sum4 = metric.New("m1",
map[string]string{},
map[string]interface{}{
"a": int64(1),
},
time.Now(),
)
aggregator := NewBasicStats()
aggregator.Stats = []string{"sum"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(sum1)
aggregator.Add(sum2)
aggregator.Add(sum3)
aggregator.Add(sum4)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_sum": float64(8),
}
expectedTags := map[string]string{}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating variance
func TestBasicStatsWithOnlyVariance(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"s2"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_s2": float64(0),
"b_s2": float64(2),
"c_s2": float64(2),
"d_s2": float64(8),
"g_s2": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating standard deviation
func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"stdev"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_stdev": float64(0),
"b_stdev": math.Sqrt(2),
"c_stdev": math.Sqrt(2),
"d_stdev": math.Sqrt(8),
"g_stdev": math.Sqrt(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating minimum and maximum
func TestBasicStatsWithMinAndMax(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"min", "max"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1), // a
"a_min": float64(1),
"b_max": float64(3), // b
"b_min": float64(1),
"c_max": float64(4), // c
"c_min": float64(2),
"d_max": float64(6), // d
"d_min": float64(2),
"e_max": float64(200), // e
"e_min": float64(200),
"f_max": float64(200), // f
"f_min": float64(200),
"g_max": float64(3), // g
"g_min": float64(1),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating diff
func TestBasicStatsWithDiff(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"diff"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_diff": float64(0),
"b_diff": float64(2),
"c_diff": float64(2),
"d_diff": float64(4),
"g_diff": float64(-2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
func TestBasicStatsWithRate(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"rate"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_rate": float64(0),
"b_rate": float64(2000),
"c_rate": float64(2000),
"d_rate": float64(4000),
"g_rate": float64(-2000),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
func TestBasicStatsWithNonNegativeRate(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"non_negative_rate"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_non_negative_rate": float64(0),
"b_non_negative_rate": float64(2000),
"c_non_negative_rate": float64(2000),
"d_non_negative_rate": float64(4000),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
func TestBasicStatsWithPctChange(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"percent_change"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_percent_change": float64(0),
"b_percent_change": float64(200),
"c_percent_change": float64(100),
"d_percent_change": float64(200),
"g_percent_change": float64(-66.66666666666666),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
func TestBasicStatsWithInterval(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"interval"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_interval": int64(time.Millisecond),
"b_interval": int64(time.Millisecond),
"c_interval": int64(time.Millisecond),
"d_interval": int64(time.Millisecond),
"g_interval": int64(time.Millisecond),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating non_negative_diff
func TestBasicStatsWithNonNegativeDiff(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"non_negative_diff"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_non_negative_diff": float64(0),
"b_non_negative_diff": float64(2),
"c_non_negative_diff": float64(2),
"d_non_negative_diff": float64(4),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test aggregating with all stats
func TestBasicStatsWithAllStats(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Log = testutil.Logger{}
minmax.Stats = []string{"count", "min", "max", "mean", "stdev", "s2", "sum", "last", "first"}
minmax.initConfiguredStats()
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2), // a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_stdev": float64(0),
"a_s2": float64(0),
"a_sum": float64(2),
"a_last": float64(1),
"a_first": float64(1),
"b_count": float64(2), // b
"b_max": float64(3),
"b_min": float64(1),
"b_mean": float64(2),
"b_s2": float64(2),
"b_sum": float64(4),
"b_last": float64(3),
"b_stdev": math.Sqrt(2),
"b_first": float64(1),
"c_count": float64(2), // c
"c_max": float64(4),
"c_min": float64(2),
"c_mean": float64(3),
"c_s2": float64(2),
"c_stdev": math.Sqrt(2),
"c_sum": float64(6),
"c_last": float64(4),
"c_first": float64(2),
"d_count": float64(2), // d
"d_max": float64(6),
"d_min": float64(2),
"d_mean": float64(4),
"d_s2": float64(8),
"d_stdev": math.Sqrt(8),
"d_sum": float64(8),
"d_last": float64(6),
"d_first": float64(2),
"e_count": float64(1), // e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
"e_sum": float64(200),
"e_last": float64(200),
"e_first": float64(200),
"f_count": float64(1), // f
"f_max": float64(200),
"f_min": float64(200),
"f_mean": float64(200),
"f_sum": float64(200),
"f_last": float64(200),
"f_first": float64(200),
"g_count": float64(2), // g
"g_max": float64(3),
"g_min": float64(1),
"g_mean": float64(2),
"g_s2": float64(2),
"g_stdev": math.Sqrt(2),
"g_sum": float64(4),
"g_last": float64(1),
"g_first": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test that if an empty array is passed, no points are pushed
func TestBasicStatsWithNoStats(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = make([]string, 0)
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
acc.AssertDoesNotContainMeasurement(t, "m1")
}
// Test that if an unknown stat is configured, it doesn't explode
func TestBasicStatsWithUnknownStat(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"crazy"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
acc.AssertDoesNotContainMeasurement(t, "m1")
}
// Test that if Stats isn't supplied, then we only do count, min, max, mean,
// stdev, and s2. We purposely exclude sum for backwards compatibility,
// otherwise user's working systems will suddenly (and surprisingly) start
// capturing sum without their input.
func TestBasicStatsWithDefaultStats(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
require.True(t, acc.HasField("m1", "a_count"))
require.True(t, acc.HasField("m1", "a_min"))
require.True(t, acc.HasField("m1", "a_max"))
require.True(t, acc.HasField("m1", "a_mean"))
require.True(t, acc.HasField("m1", "a_stdev"))
require.True(t, acc.HasField("m1", "a_s2"))
require.False(t, acc.HasField("m1", "a_sum"))
}
func TestBasicStatsWithOnlyLast(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"last"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_last": float64(1),
"b_last": float64(3),
"c_last": float64(4),
"d_last": float64(6),
"e_last": float64(200),
"f_last": float64(200),
"g_last": float64(1),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
func TestBasicStatsWithOnlyFirst(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"first"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_first": float64(1),
"b_first": float64(1),
"c_first": float64(2),
"d_first": float64(2),
"e_first": float64(200),
"f_first": float64(200),
"g_first": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}

View file

@ -0,0 +1,11 @@
# Keep the aggregate basicstats of each metric passing through.
[[aggregators.basicstats]]
## The period on which to flush & clear the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
## Configures which basic stats to push as fields
# stats = ["count","min","max","mean","variance","stdev"]

View file

@ -0,0 +1,6 @@
package aggregators
import "github.com/influxdata/telegraf"
// Deprecations lists the deprecated plugins
var Deprecations = make(map[string]telegraf.DeprecationInfo)

View file

@ -0,0 +1,228 @@
# Derivative Aggregator Plugin
This plugin computes the derivative for all fields of the aggregated metrics.
⭐ Telegraf v1.18.0
🏷️ math
💻 all
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Calculates a derivative for every field.
[[aggregators.derivative]]
## The period in which to flush the aggregator.
# period = "30s"
## Suffix to append for the resulting derivative field.
# suffix = "_rate"
## Field to use for the quotient when computing the derivative.
## When using a field as the derivation parameter the name of that field will
## be used for the resulting derivative, e.g. *fieldname_by_parameter*.
## By default the timestamps of the metrics are used and the suffix is omitted.
# variable = ""
## Maximum number of roll-overs in case only one measurement is found during a period.
# max_roll_over = 10
```
This aggregator will estimate a derivative for each field of a metric, which is
contained in both the first and last metric of the aggregation interval.
Without further configuration the derivative will be calculated with respect to
the time difference between these two measurements in seconds.
The following formula is applied is for every field
```text
derivative = (value_last - value_first) / (time_last - time_first)
```
The resulting derivative will be named `<fieldname>_rate` if no `suffix` is
configured.
To calculate a derivative for every field use
```toml
[[aggregators.derivative]]
## Specific Derivative Aggregator Arguments:
## Configure a custom derivation variable. Timestamp is used if none is given.
# variable = ""
## Suffix to add to the field name for the derivative name.
# suffix = "_rate"
## Roll-Over last measurement to first measurement of next period
# max_roll_over = 10
## General Aggregator Arguments:
## calculate derivative every 30 seconds
period = "30s"
```
## Time Derivatives
In its default configuration it determines the first and last measurement of
the period. From these measurements the time difference in seconds is
calculated. This time difference is than used to divide the difference of each
field using the following formula:
```text
derivative = (value_last - value_first) / (time_last - time_first)
```
For each field the derivative is emitted with a naming pattern
`<fieldname>_rate`.
## Custom Derivation Variable
The plugin supports to use a field of the aggregated measurements as derivation
variable in the denominator. This variable is assumed to be a monotonically
increasing value. In this feature the following formula is used:
```text
derivative = (value_last - value_first) / (variable_last - variable_first)
```
**Make sure the specified variable is not filtered and exists in the metrics
passed to this aggregator!**
When using a custom derivation variable, you should change the `suffix` of the
derivative name. See the next section on [customizing the derivative
name](#customize-the-derivative-name) for details.
## Customize the Derivative Name
The derivatives generated by the aggregator are named `<fieldname>_rate`,
i.e. they are composed of the field name and a suffix `_rate`. You can
configure the suffix to be used by changing the `suffix` parameter.
## Roll-Over to next Period
Calculating the derivative for a period requires at least two distinct
measurements during that period. Whether those are available depends on the
configuration of the aggregator `period` and the agent `interval`. By default
the last measurement is used as first measurement in the next aggregation
period. This enables a continuous calculation of the derivative. If within the
next period an earlier timestamp is encountered this measurement will replace
the roll-over metric. A main benefit of this roll-over is the ability to cope
with multiple "quiet" periods, where no new measurement is pushed to the
aggregator. The roll-over will take place at most `max_roll_over` times.
### Example of Roll-Over
Let us assume we have an input plugin, that generates a measurement with a
single metric "test" every 2 seconds. Let this metric increase the first 10
seconds from 0.0 to 10.0 and then decrease the next 10 seconds form 10.0 to 0.0:
| timestamp | value |
|-----------|-------|
| 0 | 0.0 |
| 2 | 2.0 |
| 4 | 4.0 |
| 6 | 6.0 |
| 8 | 8.0 |
| 10 | 10.0 |
| 12 | 8.0 |
| 14 | 6.0 |
| 16 | 4.0 |
| 18 | 2.0 |
| 20 | 0.0 |
To avoid thinking about border values, we consider periods to be inclusive at
the start but exclusive in the end. Using `period = "10s"` and `max_roll_over =
0` we would get the following aggregates:
| timestamp | value | aggregate | explanation |
|-----------|-------|-----------|--------------|
| 0 | 0.0 | | |
| 2 | 2.0 | | |
| 4 | 4.0 | | |
| 6 | 6.0 | | |
| 8 | 8.0 | | |
||| 1.0 | (8.0 - 0.0) / (8 - 0) |
| 10 | 10.0 | | |
| 12 | 8.0 | | |
| 14 | 6.0 | | |
| 16 | 4.0 | | |
| 18 | 2.0 | | |
||| -1.0 | (2.0 - 10.0) / (18 - 10) |
| 20 | 0.0 | | |
If we now decrease the period with `period = 2s`, no derivative could be
calculated since there would only one measurement for each period. The
aggregator will emit the log messages `Same first and last event for "test",
skipping.`. This changes, if we use `max_roll_over = 1`, since now end
measurements of a period are taking as start for the next period.
| timestamp | value | aggregate | explanation |
|-----------|-------|-----------|-------------|
| 0 | 0.0 | | |
| 2 | 2.0 | 1.0 | (2.0 - 0.0) / (2 - 0) |
| 4 | 4.0 | 1.0 | (4.0 - 2.0) / (4 - 2) |
| 6 | 6.0 | 1.0 | (6.0 - 4.0) / (6 - 4) |
| 8 | 8.0 | 1.0 | (8.0 - 6.0) / (8 - 6) |
| 10 | 10.0 | 1.0 | (10.0 - 8.0) / (10 - 8) |
| 12 | 8.0 | -1.0 | (8.0 - 10.0) / (12 - 10) |
| 14 | 6.0 | -1.0 | (6.0 - 8.0) / (14 - 12) |
| 16 | 4.0 | -1.0 | (4.0 - 6.0) / (16 - 14) |
| 18 | 2.0 | -1.0 | (2.0 - 4.0) / (18 - 16) |
| 20 | 0.0 | -1.0 | (0.0 - 2.0) / (20 - 18) |
The default `max_roll_over = 10` allows for multiple periods without
measurements either due to configuration or missing input.
There may be a slight difference in the calculation when using `max_roll_over`
compared to running without. To illustrate this, let us compare the derivatives
for `period = "7s"`.
| timestamp | value | `max_roll_over = 0` | explanation | `max_roll_over = 1` | explanation |
|-----------|-------|---------------------|-------------|---------------------|-------------|
| 0 | 0.0 | | | | |
| 2 | 2.0 | | | | |
| 4 | 4.0 | | | | |
| 6 | 6.0 | | | | |
| 7 | | 0.8571... | (6-0) / (7-0) | 0.8571... | (6-0) / (7-0) |
| 8 | 8.0 | | | | |
| 10 | 10.0 | | | | |
| 12 | 8.0 | | | | |
| 14 | 8.0 | 0.0 | (8-8) / (14-7) | 0.2857... | (8-6) / (14-7) |
| 16 | 4.0 | | | | |
| 18 | 2.0 | | | | |
| 20 | 0.0 | | | | |
||| -1.0 | -1.0 | | |
The difference stems from the change of the value between periods, e.g. from 6.0
to 8.0 between first and second period. Those changes are omitted with
`max_roll_over = 0` but are respected with `max_roll_over = 1`. That there are
no more differences in the calculated derivatives is due to the example data,
which has constant derivatives in during the first and last period, even when
including the gap between the periods. Using `max_roll_over` with a value
greater 0 may be important, if you need to detect changes between periods,
e.g. when you have very few measurements in a period or quasi-constant metrics
with only occasional changes.
### Tags
No tags are applied by this aggregator.
Existing tags are passed through the aggregator untouched.
## Example Output
```text
net bytes_recv=15409i,packets_recv=164i,bytes_sent=16649i,packets_sent=120i 1508843640000000000
net bytes_recv=73987i,packets_recv=364i,bytes_sent=87328i,packets_sent=452i 1508843660000000000
net bytes_recv_by_packets_recv=292.89 1508843660000000000
net packets_sent_rate=16.6,bytes_sent_rate=3533.95 1508843660000000000
net bytes_sent_by_packet=292.89 1508843660000000000
```

View file

@ -0,0 +1,183 @@
//go:generate ../../../tools/readme_config_includer/generator
package derivative
import (
_ "embed"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
//go:embed sample.conf
var sampleConfig string
type Derivative struct {
Variable string `toml:"variable"`
Suffix string `toml:"suffix"`
MaxRollOver uint `toml:"max_roll_over"`
Log telegraf.Logger `toml:"-"`
cache map[uint64]*aggregate
}
type aggregate struct {
first *event
last *event
name string
tags map[string]string
rollOver uint
}
type event struct {
fields map[string]float64
time time.Time
}
const defaultSuffix = "_rate"
func NewDerivative() *Derivative {
derivative := &Derivative{Suffix: defaultSuffix, MaxRollOver: 10}
derivative.cache = make(map[uint64]*aggregate)
derivative.Reset()
return derivative
}
func (*Derivative) SampleConfig() string {
return sampleConfig
}
func (d *Derivative) Add(in telegraf.Metric) {
id := in.HashID()
current, ok := d.cache[id]
if !ok {
// hit an uncached metric, create caches for first time:
d.cache[id] = newAggregate(in)
return
}
if current.first.time.After(in.Time()) {
current.first = newEvent(in)
current.rollOver = 0
} else if current.first.time.Equal(in.Time()) {
upsertConvertedFields(in.Fields(), current.first.fields)
current.rollOver = 0
}
if current.last.time.Before(in.Time()) {
current.last = newEvent(in)
current.rollOver = 0
} else if current.last.time.Equal(in.Time()) {
upsertConvertedFields(in.Fields(), current.last.fields)
current.rollOver = 0
}
}
func newAggregate(in telegraf.Metric) *aggregate {
event := newEvent(in)
return &aggregate{
name: in.Name(),
tags: in.Tags(),
first: event,
last: event,
rollOver: 0,
}
}
func newEvent(in telegraf.Metric) *event {
return &event{
fields: extractConvertedFields(in),
time: in.Time(),
}
}
func extractConvertedFields(in telegraf.Metric) map[string]float64 {
fields := make(map[string]float64, len(in.Fields()))
upsertConvertedFields(in.Fields(), fields)
return fields
}
func upsertConvertedFields(source map[string]interface{}, target map[string]float64) {
for k, v := range source {
if value, ok := convert(v); ok {
target[k] = value
}
}
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
case uint64:
return float64(v), true
}
return 0, false
}
func (d *Derivative) Push(acc telegraf.Accumulator) {
for _, aggregate := range d.cache {
if aggregate.first == aggregate.last {
d.Log.Debugf("Same first and last event for %q, skipping.", aggregate.name)
continue
}
var denominator float64
denominator = aggregate.last.time.Sub(aggregate.first.time).Seconds()
if len(d.Variable) > 0 {
var first float64
var last float64
var found bool
if first, found = aggregate.first.fields[d.Variable]; !found {
d.Log.Debugf("Did not find %q in first event for %q.", d.Variable, aggregate.name)
continue
}
if last, found = aggregate.last.fields[d.Variable]; !found {
d.Log.Debugf("Did not find %q in last event for %q.", d.Variable, aggregate.name)
continue
}
denominator = last - first
}
if denominator == 0 {
d.Log.Debugf("Got difference 0 in denominator for %q, skipping.", aggregate.name)
continue
}
derivatives := make(map[string]interface{})
for key, start := range aggregate.first.fields {
if key == d.Variable {
// Skip derivation variable
continue
}
if end, ok := aggregate.last.fields[key]; ok {
d.Log.Debugf("Adding derivative %q to %q.", key+d.Suffix, aggregate.name)
derivatives[key+d.Suffix] = (end - start) / denominator
}
}
acc.AddFields(aggregate.name, derivatives, aggregate.tags)
}
}
func (d *Derivative) Reset() {
for id, aggregate := range d.cache {
if aggregate.rollOver < d.MaxRollOver {
aggregate.first = aggregate.last
aggregate.rollOver = aggregate.rollOver + 1
d.cache[id] = aggregate
d.Log.Debugf("Roll-Over %q for the %d time.", aggregate.name, aggregate.rollOver)
} else {
delete(d.cache, id)
d.Log.Debugf("Removed %q from cache.", aggregate.name)
}
}
}
func (d *Derivative) Init() error {
d.Suffix = strings.TrimSpace(d.Suffix)
d.Variable = strings.TrimSpace(d.Variable)
return nil
}
func init() {
aggregators.Add("derivative", func() telegraf.Aggregator {
return NewDerivative()
})
}

View file

@ -0,0 +1,418 @@
package derivative
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
var start = metric.New("TestMetric",
map[string]string{"state": "full"},
map[string]interface{}{
"increasing": int64(0),
"decreasing": int64(100),
"unchanged": int64(42),
"ignored": "strings are not supported",
"parameter": float64(0.0),
},
time.Now(),
)
var finish = metric.New("TestMetric",
map[string]string{"state": "full"},
map[string]interface{}{
"increasing": int64(1000),
"decreasing": int64(0),
"unchanged": int64(42),
"ignored": "strings are not supported",
"parameter": float64(10.0),
},
time.Now().Add(time.Second),
)
func TestTwoFullEventsWithParameter(t *testing.T) {
acc := testutil.Accumulator{}
derivative := &Derivative{
Variable: "parameter",
Suffix: "_by_parameter",
cache: make(map[uint64]*aggregate),
}
derivative.Log = testutil.Logger{}
err := derivative.Init()
require.NoError(t, err)
derivative.Add(start)
derivative.Add(finish)
derivative.Push(&acc)
expectedFields := map[string]interface{}{
"increasing_by_parameter": 100.0,
"decreasing_by_parameter": -10.0,
"unchanged_by_parameter": 0.0,
}
expectedTags := map[string]string{
"state": "full",
}
acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags)
}
func TestTwoFullEventsWithParameterReverseSequence(t *testing.T) {
acc := testutil.Accumulator{}
derivative := &Derivative{
Variable: "parameter",
Suffix: "_by_parameter",
cache: make(map[uint64]*aggregate),
}
derivative.Log = testutil.Logger{}
err := derivative.Init()
require.NoError(t, err)
derivative.Add(finish)
derivative.Add(start)
derivative.Push(&acc)
expectedFields := map[string]interface{}{
"increasing_by_parameter": 100.0,
"decreasing_by_parameter": -10.0,
"unchanged_by_parameter": 0.0,
}
expectedTags := map[string]string{
"state": "full",
}
acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags)
}
func TestTwoFullEventsWithoutParameter(t *testing.T) {
acc := testutil.Accumulator{}
derivative := NewDerivative()
derivative.Log = testutil.Logger{}
err := derivative.Init()
require.NoError(t, err)
startTime := time.Now()
duration, err := time.ParseDuration("2s")
require.NoError(t, err)
endTime := startTime.Add(duration)
first := metric.New("One Field",
map[string]string{},
map[string]interface{}{
"value": int64(10),
},
startTime,
)
last := metric.New("One Field",
map[string]string{},
map[string]interface{}{
"value": int64(20),
},
endTime,
)
derivative.Add(first)
derivative.Add(last)
derivative.Push(&acc)
acc.AssertContainsFields(t,
"One Field",
map[string]interface{}{
"value_rate": float64(5),
},
)
}
func TestTwoFullEventsInSeparatePushes(t *testing.T) {
acc := testutil.Accumulator{}
derivative := &Derivative{
Variable: " parameter",
Suffix: "_wrt_parameter",
MaxRollOver: 10,
cache: make(map[uint64]*aggregate),
}
derivative.Log = testutil.Logger{}
err := derivative.Init()
require.NoError(t, err)
derivative.Add(start)
derivative.Push(&acc)
acc.AssertDoesNotContainMeasurement(t, "TestMetric")
acc.ClearMetrics()
derivative.Add(finish)
derivative.Push(&acc)
expectedFields := map[string]interface{}{
"increasing_wrt_parameter": 100.0,
"decreasing_wrt_parameter": -10.0,
"unchanged_wrt_parameter": 0.0,
}
expectedTags := map[string]string{
"state": "full",
}
acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags)
}
func TestTwoFullEventsInSeparatePushesWithSeveralRollOvers(t *testing.T) {
acc := testutil.Accumulator{}
derivative := &Derivative{
Variable: "parameter",
Suffix: "_wrt_parameter",
MaxRollOver: 10,
cache: make(map[uint64]*aggregate),
}
derivative.Log = testutil.Logger{}
err := derivative.Init()
require.NoError(t, err)
derivative.Add(start)
derivative.Push(&acc)
acc.AssertDoesNotContainMeasurement(t, "TestMetric")
derivative.Push(&acc)
derivative.Push(&acc)
derivative.Push(&acc)
derivative.Add(finish)
derivative.Push(&acc)
expectedFields := map[string]interface{}{
"increasing_wrt_parameter": 100.0,
"decreasing_wrt_parameter": -10.0,
"unchanged_wrt_parameter": 0.0,
}
acc.AssertContainsFields(t, "TestMetric", expectedFields)
}
func TestTwoFullEventsInSeparatePushesWithOutRollOver(t *testing.T) {
acc := testutil.Accumulator{}
derivative := &Derivative{
Variable: "parameter",
Suffix: "_by_parameter",
MaxRollOver: 0,
cache: make(map[uint64]*aggregate),
}
derivative.Log = testutil.Logger{}
err := derivative.Init()
require.NoError(t, err)
derivative.Add(start)
// This test relies on RunningAggregator always callining Reset after Push
// to remove the first metric after max-rollover of 0 has been reached.
derivative.Push(&acc)
derivative.Reset()
acc.AssertDoesNotContainMeasurement(t, "TestMetric")
acc.ClearMetrics()
derivative.Add(finish)
derivative.Push(&acc)
acc.AssertDoesNotContainMeasurement(t, "TestMetric")
}
func TestIgnoresMissingVariable(t *testing.T) {
acc := testutil.Accumulator{}
derivative := &Derivative{
Variable: "parameter",
Suffix: "_by_parameter",
cache: make(map[uint64]*aggregate),
}
derivative.Log = testutil.Logger{}
err := derivative.Init()
require.NoError(t, err)
noParameter := metric.New("TestMetric",
map[string]string{"state": "no_parameter"},
map[string]interface{}{
"increasing": int64(100),
"decreasing": int64(0),
"unchanged": int64(42),
},
time.Now(),
)
derivative.Add(noParameter)
derivative.Push(&acc)
acc.AssertDoesNotContainMeasurement(t, "TestMetric")
acc.ClearMetrics()
derivative.Add(noParameter)
derivative.Add(start)
derivative.Add(noParameter)
derivative.Add(finish)
derivative.Add(noParameter)
derivative.Push(&acc)
expectedFields := map[string]interface{}{
"increasing_by_parameter": 100.0,
"decreasing_by_parameter": -10.0,
"unchanged_by_parameter": 0.0,
}
expectedTags := map[string]string{
"state": "full",
}
acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags)
}
func TestMergesDifferentMetricsWithSameHash(t *testing.T) {
acc := testutil.Accumulator{}
derivative := NewDerivative()
derivative.Log = testutil.Logger{}
err := derivative.Init()
require.NoError(t, err)
startTime := time.Now()
duration, err := time.ParseDuration("2s")
require.NoError(t, err)
endTime := startTime.Add(duration)
part1 := metric.New("TestMetric",
map[string]string{"state": "full"},
map[string]interface{}{"field1": int64(10)},
startTime,
)
part2 := metric.New("TestMetric",
map[string]string{"state": "full"},
map[string]interface{}{"field2": int64(20)},
startTime,
)
final := metric.New("TestMetric",
map[string]string{"state": "full"},
map[string]interface{}{
"field1": int64(30),
"field2": int64(30),
},
endTime,
)
derivative.Add(part1)
derivative.Push(&acc)
derivative.Add(part2)
derivative.Push(&acc)
derivative.Add(final)
derivative.Push(&acc)
expectedFields := map[string]interface{}{
"field1_rate": 10.0,
"field2_rate": 5.0,
}
expectedTags := map[string]string{
"state": "full",
}
acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags)
}
func TestDropsAggregatesOnMaxRollOver(t *testing.T) {
acc := testutil.Accumulator{}
derivative := &Derivative{
MaxRollOver: 1,
cache: make(map[uint64]*aggregate),
}
derivative.Log = testutil.Logger{}
err := derivative.Init()
require.NoError(t, err)
derivative.Add(start)
derivative.Push(&acc)
derivative.Reset()
derivative.Push(&acc)
derivative.Reset()
derivative.Add(finish)
derivative.Push(&acc)
derivative.Reset()
acc.AssertDoesNotContainMeasurement(t, "TestMetric")
}
func TestAddMetricsResetsRollOver(t *testing.T) {
acc := testutil.Accumulator{}
derivative := &Derivative{
Variable: "parameter",
Suffix: "_by_parameter",
MaxRollOver: 1,
cache: make(map[uint64]*aggregate),
Log: testutil.Logger{},
}
err := derivative.Init()
require.NoError(t, err)
derivative.Add(start)
derivative.Push(&acc)
derivative.Reset()
derivative.Add(start)
derivative.Reset()
derivative.Add(finish)
derivative.Push(&acc)
expectedFields := map[string]interface{}{
"increasing_by_parameter": 100.0,
"decreasing_by_parameter": -10.0,
"unchanged_by_parameter": 0.0,
}
acc.AssertContainsFields(t, "TestMetric", expectedFields)
}
func TestCalculatesCorrectDerivativeOnTwoConsecutivePeriods(t *testing.T) {
acc := testutil.Accumulator{}
period, err := time.ParseDuration("10s")
require.NoError(t, err)
derivative := NewDerivative()
derivative.Log = testutil.Logger{}
require.NoError(t, derivative.Init())
startTime := time.Now()
first := metric.New("One Field",
map[string]string{},
map[string]interface{}{
"value": int64(10),
},
startTime,
)
derivative.Add(first)
derivative.Push(&acc)
derivative.Reset()
second := metric.New("One Field",
map[string]string{},
map[string]interface{}{
"value": int64(20),
},
startTime.Add(period),
)
derivative.Add(second)
derivative.Push(&acc)
derivative.Reset()
acc.AssertContainsFields(t, "One Field", map[string]interface{}{
"value_rate": 1.0,
})
acc.ClearMetrics()
third := metric.New("One Field",
map[string]string{},
map[string]interface{}{
"value": int64(40),
},
startTime.Add(period).Add(period),
)
derivative.Add(third)
derivative.Push(&acc)
derivative.Reset()
acc.AssertContainsFields(t, "One Field", map[string]interface{}{
"value_rate": 2.0,
})
}

View file

@ -0,0 +1,16 @@
# Calculates a derivative for every field.
[[aggregators.derivative]]
## The period in which to flush the aggregator.
# period = "30s"
## Suffix to append for the resulting derivative field.
# suffix = "_rate"
## Field to use for the quotient when computing the derivative.
## When using a field as the derivation parameter the name of that field will
## be used for the resulting derivative, e.g. *fieldname_by_parameter*.
## By default the timestamps of the metrics are used and the suffix is omitted.
# variable = ""
## Maximum number of roll-overs in case only one measurement is found during a period.
# max_roll_over = 10

View file

@ -0,0 +1,92 @@
# Final Aggregator Plugin
This plugin emits the last metric of a contiguous series, defined as a
series which receives updates within the time period in `series_timeout`. The
contiguous series may be longer than the time interval defined by `period`.
When a series has not been updated within the `series_timeout`, the last metric
is emitted.
Alternatively, the plugin emits the last metric in the `period` for the
`periodic` output strategy.
This is useful for getting the final value for data sources that produce
discrete time series such as procstat, cgroup, kubernetes etc. or to downsample
metrics collected at a higher frequency.
> [!NOTE]
> All emited metrics do have fields with `_final` appended to the field-name
> by default.
⭐ Telegraf v1.11.0
🏷️ sampling
💻 all
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Report the final metric of a series
[[aggregators.final]]
## The period on which to flush & clear the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
## If false, _final is added to every field name
# keep_original_field_names = false
## The time that a series is not updated until considering it final. Ignored
## when output_strategy is "periodic".
# series_timeout = "5m"
## Output strategy, supported values:
## timeout -- output a metric if no new input arrived for `series_timeout`
## periodic -- output the last received metric every `period`
# output_strategy = "timeout"
```
### Output strategy
By default (`output_strategy = "timeout"`) the plugin will only emit a metric
for the period if the last received one is older than the series_timeout. This
will not guarantee a regular output of a `final` metric e.g. if the
series-timeout is a multiple of the gathering interval for an input. In this
case metric sporadically arrive in the timeout phase of the period and emitting
the `final` metric is suppressed.
Contrary to this, `output_strategy = "periodic"` will always output a `final`
metric at the end of the period irrespectively of when the last metric arrived,
the `series_timeout` is ignored.
## Metrics
Measurement and tags are unchanged, fields are emitted with the suffix
`_final`.
## Example Output
```text
counter,host=bar i_final=3,j_final=6 1554281635115090133
counter,host=foo i_final=3,j_final=6 1554281635112992012
```
Original input:
```text
counter,host=bar i=1,j=4 1554281633101153300
counter,host=foo i=1,j=4 1554281633099323601
counter,host=bar i=2,j=5 1554281634107980073
counter,host=foo i=2,j=5 1554281634105931116
counter,host=bar i=3,j=6 1554281635115090133
counter,host=foo i=3,j=6 1554281635112992012
```

View file

@ -0,0 +1,90 @@
//go:generate ../../../tools/readme_config_includer/generator
package final
import (
_ "embed"
"fmt"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/aggregators"
)
//go:embed sample.conf
var sampleConfig string
type Final struct {
OutputStrategy string `toml:"output_strategy"`
SeriesTimeout config.Duration `toml:"series_timeout"`
KeepOriginalFieldNames bool `toml:"keep_original_field_names"`
// The last metric for all series which are active
metricCache map[uint64]telegraf.Metric
}
func NewFinal() *Final {
return &Final{
SeriesTimeout: config.Duration(5 * time.Minute),
}
}
func (*Final) SampleConfig() string {
return sampleConfig
}
func (m *Final) Init() error {
// Check options and set defaults
switch m.OutputStrategy {
case "":
m.OutputStrategy = "timeout"
case "timeout", "periodic":
// Do nothing, those are valid
default:
return fmt.Errorf("invalid 'output_strategy': %q", m.OutputStrategy)
}
// Initialize the cache
m.metricCache = make(map[uint64]telegraf.Metric)
return nil
}
func (m *Final) Add(in telegraf.Metric) {
id := in.HashID()
m.metricCache[id] = in
}
func (m *Final) Push(acc telegraf.Accumulator) {
// Preserve timestamp of original metric
acc.SetPrecision(time.Nanosecond)
for id, metric := range m.metricCache {
if m.OutputStrategy == "timeout" && time.Since(metric.Time()) <= time.Duration(m.SeriesTimeout) {
// We output on timeout but the last metric of the series was
// younger than that. So skip the output for this period.
continue
}
var fields map[string]any
if m.KeepOriginalFieldNames {
fields = metric.Fields()
} else {
fields = make(map[string]any, len(metric.FieldList()))
for _, field := range metric.FieldList() {
fields[field.Key+"_final"] = field.Value
}
}
acc.AddFields(metric.Name(), fields, metric.Tags(), metric.Time())
delete(m.metricCache, id)
}
}
func (*Final) Reset() {
}
func init() {
aggregators.Add("final", func() telegraf.Aggregator {
return NewFinal()
})
}

View file

@ -0,0 +1,308 @@
package final
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
func TestSimple(t *testing.T) {
acc := testutil.Accumulator{}
final := NewFinal()
require.NoError(t, final.Init())
tags := map[string]string{"foo": "bar"}
m1 := metric.New("m1",
tags,
map[string]interface{}{"a": int64(1)},
time.Unix(1530939936, 0))
m2 := metric.New("m1",
tags,
map[string]interface{}{"a": int64(2)},
time.Unix(1530939937, 0))
m3 := metric.New("m1",
tags,
map[string]interface{}{"a": int64(3)},
time.Unix(1530939938, 0))
final.Add(m1)
final.Add(m2)
final.Add(m3)
final.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"m1",
tags,
map[string]interface{}{
"a_final": 3,
},
time.Unix(1530939938, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func TestTwoTags(t *testing.T) {
acc := testutil.Accumulator{}
final := NewFinal()
require.NoError(t, final.Init())
tags1 := map[string]string{"foo": "bar"}
tags2 := map[string]string{"foo": "baz"}
m1 := metric.New("m1",
tags1,
map[string]interface{}{"a": int64(1)},
time.Unix(1530939936, 0))
m2 := metric.New("m1",
tags2,
map[string]interface{}{"a": int64(2)},
time.Unix(1530939937, 0))
m3 := metric.New("m1",
tags1,
map[string]interface{}{"a": int64(3)},
time.Unix(1530939938, 0))
final.Add(m1)
final.Add(m2)
final.Add(m3)
final.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"m1",
tags2,
map[string]interface{}{
"a_final": 2,
},
time.Unix(1530939937, 0),
),
testutil.MustMetric(
"m1",
tags1,
map[string]interface{}{
"a_final": 3,
},
time.Unix(1530939938, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics())
}
func TestLongDifference(t *testing.T) {
acc := testutil.Accumulator{}
final := NewFinal()
final.SeriesTimeout = config.Duration(30 * time.Second)
require.NoError(t, final.Init())
tags := map[string]string{"foo": "bar"}
now := time.Now()
m1 := metric.New("m",
tags,
map[string]interface{}{"a": int64(1)},
now.Add(time.Second*-290))
m2 := metric.New("m",
tags,
map[string]interface{}{"a": int64(2)},
now.Add(time.Second*-275))
m3 := metric.New("m",
tags,
map[string]interface{}{"a": int64(3)},
now.Add(time.Second*-100))
m4 := metric.New("m",
tags,
map[string]interface{}{"a": int64(4)},
now.Add(time.Second*-20))
final.Add(m1)
final.Add(m2)
final.Push(&acc)
final.Add(m3)
final.Push(&acc)
final.Add(m4)
final.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"m",
tags,
map[string]interface{}{
"a_final": 2,
},
now.Add(time.Second*-275),
),
testutil.MustMetric(
"m",
tags,
map[string]interface{}{
"a_final": 3,
},
now.Add(time.Second*-100),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics())
}
func TestOutputStrategyInvalid(t *testing.T) {
final := &Final{
OutputStrategy: "no way",
SeriesTimeout: config.Duration(30 * time.Second),
}
require.ErrorContains(t, final.Init(), `invalid 'output_strategy'`)
}
func TestOutputStrategyTimeout(t *testing.T) {
final := &Final{
OutputStrategy: "timeout",
SeriesTimeout: config.Duration(30 * time.Second),
}
require.NoError(t, final.Init())
now := time.Now()
tags := map[string]string{"foo": "bar"}
m1 := metric.New("m",
tags,
map[string]interface{}{"a": int64(1)},
now.Add(time.Second*-290))
m2 := metric.New("m",
tags,
map[string]interface{}{"a": int64(2)},
now.Add(time.Second*-275))
m3 := metric.New("m",
tags,
map[string]interface{}{"a": int64(3)},
now.Add(time.Second*-100))
m4 := metric.New("m",
tags,
map[string]interface{}{"a": int64(4)},
now.Add(time.Second*-20))
var acc testutil.Accumulator
final.Add(m1)
final.Add(m2)
final.Push(&acc)
final.Add(m3)
final.Push(&acc)
final.Add(m4)
final.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"m",
tags,
map[string]interface{}{
"a_final": 2,
},
now.Add(time.Second*-275),
),
testutil.MustMetric(
"m",
tags,
map[string]interface{}{
"a_final": 3,
},
now.Add(time.Second*-100),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics())
}
func TestOutputStrategyPeriodic(t *testing.T) {
final := &Final{
OutputStrategy: "periodic",
SeriesTimeout: config.Duration(30 * time.Second),
}
require.NoError(t, final.Init())
now := time.Now()
tags := map[string]string{"foo": "bar"}
m1 := metric.New("m",
tags,
map[string]interface{}{"a": int64(1)},
now.Add(time.Second*-290))
m2 := metric.New("m",
tags,
map[string]interface{}{"a": int64(2)},
now.Add(time.Second*-275))
m3 := metric.New("m",
tags,
map[string]interface{}{"a": int64(3)},
now.Add(time.Second*-100))
m4 := metric.New("m",
tags,
map[string]interface{}{"a": int64(4)},
now.Add(time.Second*-20))
var acc testutil.Accumulator
final.Add(m1)
final.Add(m2)
final.Push(&acc)
final.Add(m3)
final.Push(&acc)
final.Add(m4)
final.Push(&acc)
expected := []telegraf.Metric{
metric.New(
"m",
tags,
map[string]interface{}{
"a_final": 2,
},
now.Add(time.Second*-275),
),
metric.New(
"m",
tags,
map[string]interface{}{
"a_final": 3,
},
now.Add(time.Second*-100),
),
metric.New(
"m",
tags,
map[string]interface{}{
"a_final": 4,
},
now.Add(time.Second*-20),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics())
}
func TestKeepOriginalFieldNames(t *testing.T) {
final := &Final{
OutputStrategy: "periodic",
SeriesTimeout: config.Duration(30 * time.Second),
KeepOriginalFieldNames: true,
}
require.NoError(t, final.Init())
now := time.Now()
tags := map[string]string{"foo": "bar"}
m1 := metric.New("m",
tags,
map[string]any{"a": 3},
now.Add(time.Second*-90))
var acc testutil.Accumulator
final.Add(m1)
final.Push(&acc)
expected := []telegraf.Metric{
metric.New(
"m",
tags,
map[string]any{"a": 3},
now.Add(time.Second*-90),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics())
}

View file

@ -0,0 +1,20 @@
# Report the final metric of a series
[[aggregators.final]]
## The period on which to flush & clear the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
## If false, _final is added to every field name
# keep_original_field_names = false
## The time that a series is not updated until considering it final. Ignored
## when output_strategy is "periodic".
# series_timeout = "5m"
## Output strategy, supported values:
## timeout -- output a metric if no new input arrived for `series_timeout`
## periodic -- output the last received metric every `period`
# output_strategy = "timeout"

View file

@ -0,0 +1,135 @@
# Histogram Aggregator Plugin
This plugin creates histograms containing the counts of field values within the
configured range. The histogram metric is emitted every `period`.
In `cumulative` mode, values added to a bucket are also added to the
consecutive buckets in the distribution creating a [cumulative histogram][1].
> [!NOTE]
> By default bucket counts are not reset between periods and will be
> non-strictly increasing while Telegraf is running. This behavior can be
> by setting the `reset` parameter.
⭐ Telegraf v1.4.0
🏷️ statistics
💻 all
[1]: https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Configuration for aggregate histogram metrics
[[aggregators.histogram]]
## The period in which to flush the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
## If true, the histogram will be reset on flush instead
## of accumulating the results.
reset = false
## Whether bucket values should be accumulated. If set to false, "gt" tag will be added.
## Defaults to true.
cumulative = true
## Expiration interval for each histogram. The histogram will be expired if
## there are no changes in any buckets for this time interval. 0 == no expiration.
# expiration_interval = "0m"
## If true, aggregated histogram are pushed to output only if it was updated since
## previous push. Defaults to false.
# push_only_on_update = false
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## Right borders of buckets (with +Inf implicitly added).
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## Right borders of buckets (with +Inf implicitly added).
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
```
The user is responsible for defining the bounds of the histogram bucket as
well as the measurement name and fields to aggregate.
Each histogram config section must contain a `buckets` and `measurement_name`
option. Optionally, if `fields` is set only the fields listed will be
aggregated. If `fields` is not set all fields are aggregated.
The `buckets` option contains a list of floats which specify the bucket
boundaries. Each float value defines the inclusive upper (right) bound of the
bucket. The `+Inf` bucket is added automatically and does not need to be
defined. (For left boundaries, these specified bucket borders and `-Inf` will
be used).
## Measurements & Fields
The postfix `bucket` will be added to each field key.
- measurement1
- field1_bucket
- field2_bucket
### Tags
- `cumulative = true` (default):
- `le`: Right bucket border. It means that the metric value is less than or
equal to the value of this tag. If a metric value is sorted into a bucket,
it is also sorted into all larger buckets. As a result, the value of
`<field>_bucket` is rising with rising `le` value. When `le` is `+Inf`,
the bucket value is the count of all metrics, because all metric values are
less than or equal to positive infinity.
- `cumulative = false`:
- `gt`: Left bucket border. It means that the metric value is greater than
(and not equal to) the value of this tag.
- `le`: Right bucket border. It means that the metric value is less than or
equal to the value of this tag.
- As both `gt` and `le` are present, each metric is sorted in only exactly
one bucket.
## Example Output
Let assume we have the buckets [0, 10, 50, 100] and the following field values
for `usage_idle`: [50, 7, 99, 12]
With `cumulative = true`:
```text
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7
cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000 # 7, 12
cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=4i 1486998330000000000 # 7, 12, 50, 99
cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=4i 1486998330000000000 # 7, 12, 50, 99
```
With `cumulative = false`:
```text
cpu,cpu=cpu1,host=localhost,gt=-Inf,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none
cpu,cpu=cpu1,host=localhost,gt=0.0,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7
cpu,cpu=cpu1,host=localhost,gt=10.0,le=50.0 usage_idle_bucket=1i 1486998330000000000 # 12
cpu,cpu=cpu1,host=localhost,gt=50.0,le=100.0 usage_idle_bucket=2i 1486998330000000000 # 50, 99
cpu,cpu=cpu1,host=localhost,gt=100.0,le=+Inf usage_idle_bucket=0i 1486998330000000000 # none
```

View file

@ -0,0 +1,327 @@
//go:generate ../../../tools/readme_config_includer/generator
package histogram
import (
_ "embed"
"sort"
"strconv"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/aggregators"
)
//go:embed sample.conf
var sampleConfig string
// bucketRightTag is the tag, which contains right bucket border
const bucketRightTag = "le"
// bucketPosInf is the right bucket border for infinite values
const bucketPosInf = "+Inf"
// bucketLeftTag is the tag, which contains left bucket border (exclusive)
const bucketLeftTag = "gt"
// bucketNegInf is the left bucket border for infinite values
const bucketNegInf = "-Inf"
// HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics
type HistogramAggregator struct {
Configs []bucketConfig `toml:"config"`
ResetBuckets bool `toml:"reset"`
Cumulative bool `toml:"cumulative"`
ExpirationInterval config.Duration `toml:"expiration_interval"`
PushOnlyOnUpdate bool `toml:"push_only_on_update"`
buckets bucketsByMetrics
cache map[uint64]metricHistogramCollection
}
// bucketConfig is the config, which contains name, field of metric and histogram buckets.
type bucketConfig struct {
Metric string `toml:"measurement_name"`
Fields []string `toml:"fields"`
Buckets buckets `toml:"buckets"`
}
// bucketsByMetrics contains the buckets grouped by metric and field name
type bucketsByMetrics map[string]bucketsByFields
// bucketsByFields contains the buckets grouped by field name
type bucketsByFields map[string]buckets
// buckets contains the right borders buckets
type buckets []float64
// metricHistogramCollection aggregates the histogram data
type metricHistogramCollection struct {
histogramCollection map[string]counts
name string
tags map[string]string
expireTime time.Time
updated bool
}
// counts is the number of hits in the bucket
type counts []int64
// groupedByCountFields contains grouped fields by their count and fields values
type groupedByCountFields struct {
name string
tags map[string]string
fieldsWithCount map[string]int64
}
var timeNow = time.Now
// NewHistogramAggregator creates new histogram aggregator
func NewHistogramAggregator() *HistogramAggregator {
h := &HistogramAggregator{
Cumulative: true,
}
h.buckets = make(bucketsByMetrics)
h.resetCache()
return h
}
func (*HistogramAggregator) SampleConfig() string {
return sampleConfig
}
// Add adds new hit to the buckets
func (h *HistogramAggregator) Add(in telegraf.Metric) {
addTime := timeNow()
bucketsByField := make(map[string][]float64)
for field := range in.Fields() {
buckets := h.getBuckets(in.Name(), field)
if buckets != nil {
bucketsByField[field] = buckets
}
}
if len(bucketsByField) == 0 {
return
}
id := in.HashID()
agr, ok := h.cache[id]
if !ok {
agr = metricHistogramCollection{
name: in.Name(),
tags: in.Tags(),
histogramCollection: make(map[string]counts),
}
}
for field, value := range in.Fields() {
if buckets, ok := bucketsByField[field]; ok {
if agr.histogramCollection[field] == nil {
agr.histogramCollection[field] = make(counts, len(buckets)+1)
}
if value, ok := convert(value); ok {
index := sort.SearchFloat64s(buckets, value)
agr.histogramCollection[field][index]++
}
if h.ExpirationInterval != 0 {
agr.expireTime = addTime.Add(time.Duration(h.ExpirationInterval))
}
agr.updated = true
}
}
h.cache[id] = agr
}
// Push returns histogram values for metrics
func (h *HistogramAggregator) Push(acc telegraf.Accumulator) {
now := timeNow()
metricsWithGroupedFields := make([]groupedByCountFields, 0)
for id, aggregate := range h.cache {
if h.ExpirationInterval != 0 && now.After(aggregate.expireTime) {
delete(h.cache, id)
continue
}
if h.PushOnlyOnUpdate && !h.cache[id].updated {
continue
}
aggregate.updated = false
h.cache[id] = aggregate
for field, counts := range aggregate.histogramCollection {
h.groupFieldsByBuckets(&metricsWithGroupedFields, aggregate.name, field, copyTags(aggregate.tags), counts)
}
}
for _, metric := range metricsWithGroupedFields {
acc.AddFields(metric.name, makeFieldsWithCount(metric.fieldsWithCount), metric.tags)
}
}
// groupFieldsByBuckets groups fields by metric buckets which are represented as tags
func (h *HistogramAggregator) groupFieldsByBuckets(
metricsWithGroupedFields *[]groupedByCountFields, name, field string, tags map[string]string, counts []int64,
) {
sum := int64(0)
buckets := h.getBuckets(name, field) // note that len(buckets) + 1 == len(counts)
for index, count := range counts {
if !h.Cumulative {
sum = 0 // reset sum -> don't store cumulative counts
tags[bucketLeftTag] = bucketNegInf
if index > 0 {
tags[bucketLeftTag] = strconv.FormatFloat(buckets[index-1], 'f', -1, 64)
}
}
tags[bucketRightTag] = bucketPosInf
if index < len(buckets) {
tags[bucketRightTag] = strconv.FormatFloat(buckets[index], 'f', -1, 64)
}
sum += count
groupField(metricsWithGroupedFields, name, field, sum, copyTags(tags))
}
}
// groupField groups field by count value
func groupField(metricsWithGroupedFields *[]groupedByCountFields, name, field string, count int64, tags map[string]string) {
for key, metric := range *metricsWithGroupedFields {
if name == metric.name && isTagsIdentical(tags, metric.tags) {
(*metricsWithGroupedFields)[key].fieldsWithCount[field] = count
return
}
}
fieldsWithCount := map[string]int64{
field: count,
}
*metricsWithGroupedFields = append(
*metricsWithGroupedFields,
groupedByCountFields{name: name, tags: tags, fieldsWithCount: fieldsWithCount},
)
}
// Reset does nothing by default, because we typically need to collect counts for a long time.
// Otherwise if config parameter 'reset' has 'true' value, we will get a histogram
// with a small amount of the distribution. However in some use cases a reset is useful.
func (h *HistogramAggregator) Reset() {
if h.ResetBuckets {
h.resetCache()
h.buckets = make(bucketsByMetrics)
}
}
// resetCache resets cached counts(hits) in the buckets
func (h *HistogramAggregator) resetCache() {
h.cache = make(map[uint64]metricHistogramCollection)
}
// getBuckets finds buckets and returns them
func (h *HistogramAggregator) getBuckets(metric, field string) []float64 {
if buckets, ok := h.buckets[metric][field]; ok {
return buckets
}
for _, cfg := range h.Configs {
if cfg.Metric == metric {
if !isBucketExists(field, cfg) {
continue
}
if _, ok := h.buckets[metric]; !ok {
h.buckets[metric] = make(bucketsByFields)
}
h.buckets[metric][field] = sortBuckets(cfg.Buckets)
}
}
return h.buckets[metric][field]
}
// isBucketExists checks if buckets exists for the passed field
func isBucketExists(field string, cfg bucketConfig) bool {
if len(cfg.Fields) == 0 {
return true
}
for _, fl := range cfg.Fields {
if fl == field {
return true
}
}
return false
}
// sortBuckets sorts the buckets if it is needed
func sortBuckets(buckets []float64) []float64 {
for i, bucket := range buckets {
if i < len(buckets)-1 && bucket >= buckets[i+1] {
sort.Float64s(buckets)
break
}
}
return buckets
}
// convert converts interface to concrete type
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
// copyTags copies tags
func copyTags(tags map[string]string) map[string]string {
copiedTags := make(map[string]string, len(tags))
for key, val := range tags {
copiedTags[key] = val
}
return copiedTags
}
// isTagsIdentical checks the identity of two list of tags
func isTagsIdentical(originalTags, checkedTags map[string]string) bool {
if len(originalTags) != len(checkedTags) {
return false
}
for tagName, tagValue := range originalTags {
if tagValue != checkedTags[tagName] {
return false
}
}
return true
}
// makeFieldsWithCount assigns count value to all metric fields
func makeFieldsWithCount(fieldsWithCountIn map[string]int64) map[string]interface{} {
fieldsWithCountOut := make(map[string]interface{}, len(fieldsWithCountIn))
for field, count := range fieldsWithCountIn {
fieldsWithCountOut[field+"_bucket"] = count
}
return fieldsWithCountOut
}
// init initializes histogram aggregator plugin
func init() {
aggregators.Add("histogram", func() telegraf.Aggregator {
return NewHistogramAggregator()
})
}

View file

@ -0,0 +1,529 @@
package histogram
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
type fields map[string]interface{}
type tags map[string]string
// NewTestHistogram creates new test histogram aggregation with specified config
func NewTestHistogram(cfg []bucketConfig, reset, cumulative, pushOnlyOnUpdate bool) telegraf.Aggregator {
return NewTestHistogramWithExpirationInterval(cfg, reset, cumulative, pushOnlyOnUpdate, 0)
}
func NewTestHistogramWithExpirationInterval(
cfg []bucketConfig, reset, cumulative, pushOnlyOnUpdate bool, expirationInterval config.Duration,
) telegraf.Aggregator {
htm := NewHistogramAggregator()
htm.Configs = cfg
htm.ResetBuckets = reset
htm.Cumulative = cumulative
htm.ExpirationInterval = expirationInterval
htm.PushOnlyOnUpdate = pushOnlyOnUpdate
return htm
}
// firstMetric1 is the first test metric
var firstMetric1 = metric.New(
"first_metric_name",
tags{},
fields{
"a": float64(15.3),
"b": float64(40),
},
time.Now(),
)
// firstMetric1 is the first test metric with other value
var firstMetric2 = metric.New(
"first_metric_name",
tags{},
fields{
"a": float64(15.9),
"c": float64(40),
},
time.Now(),
)
// secondMetric is the second metric
var secondMetric = metric.New(
"second_metric_name",
tags{},
fields{
"a": float64(105),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
// BenchmarkApply runs benchmarks
func BenchmarkApply(b *testing.B) {
histogram := NewHistogramAggregator()
for n := 0; n < b.N; n++ {
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Add(secondMetric)
}
}
// TestHistogram tests metrics for one period and for one field
func TestHistogram(t *testing.T) {
var cfg []bucketConfig
cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg, false, true, false)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Reset()
histogram.Add(firstMetric2)
histogram.Push(acc)
require.Len(t, acc.Metrics, 6, "Incorrect number of metrics")
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "20"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "30"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "40"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: bucketPosInf})
}
// TestHistogram tests metrics for one period, for one field and push only on histogram update
func TestHistogramPushOnUpdate(t *testing.T) {
var cfg []bucketConfig
cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg, false, true, true)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Reset()
histogram.Add(firstMetric2)
histogram.Push(acc)
require.Len(t, acc.Metrics, 6, "Incorrect number of metrics")
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "20"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "30"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "40"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: bucketPosInf})
acc.ClearMetrics()
histogram.Push(acc)
require.Empty(t, acc.Metrics, "Incorrect number of metrics")
histogram.Add(firstMetric2)
histogram.Push(acc)
require.Len(t, acc.Metrics, 6, "Incorrect number of metrics")
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(3)}, tags{bucketRightTag: "20"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(3)}, tags{bucketRightTag: "30"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(3)}, tags{bucketRightTag: "40"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(3)}, tags{bucketRightTag: bucketPosInf})
}
// TestHistogramNonCumulative tests metrics for one period and for one field
func TestHistogramNonCumulative(t *testing.T) {
var cfg []bucketConfig
cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg, false, false, false)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Reset()
histogram.Add(firstMetric2)
histogram.Push(acc)
require.Len(t, acc.Metrics, 6, "Incorrect number of metrics")
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "10"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketLeftTag: "10", bucketRightTag: "20"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "20", bucketRightTag: "30"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "30", bucketRightTag: "40"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "40", bucketRightTag: bucketPosInf})
}
// TestHistogramWithReset tests metrics for one period and for one field, with reset between metrics adding
func TestHistogramWithReset(t *testing.T) {
var cfg []bucketConfig
cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg, true, true, false)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Reset()
histogram.Add(firstMetric2)
histogram.Push(acc)
require.Len(t, acc.Metrics, 6, "Incorrect number of metrics")
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "20"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "30"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "40"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf})
}
// TestHistogramWithAllFields tests two metrics for one period and for all fields
func TestHistogramWithAllFields(t *testing.T) {
cfg := []bucketConfig{
{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}},
{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}},
}
histogram := NewTestHistogram(cfg, false, true, false)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Add(secondMetric)
histogram.Push(acc)
require.Len(t, acc.Metrics, 12, "Incorrect number of metrics")
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "0"})
assertContainsTaggedField(
t,
acc,
"first_metric_name",
fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)},
tags{bucketRightTag: "15.5"},
)
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "20"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "30"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: "40"})
assertContainsTaggedField(
t,
acc,
"first_metric_name",
fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)},
tags{bucketRightTag: bucketPosInf},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketRightTag: "0"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketRightTag: "4"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketRightTag: "10"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketRightTag: "23"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketRightTag: "30"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketRightTag: bucketPosInf},
)
}
// TestHistogramWithAllFieldsNonCumulative tests two metrics for one period and for all fields
func TestHistogramWithAllFieldsNonCumulative(t *testing.T) {
cfg := []bucketConfig{
{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}},
{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}},
}
histogram := NewTestHistogram(cfg, false, false, false)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Add(secondMetric)
histogram.Push(acc)
require.Len(t, acc.Metrics, 12, "Incorrect number of metrics")
assertContainsTaggedField(
t,
acc,
"first_metric_name",
fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)},
tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"},
)
assertContainsTaggedField(
t,
acc,
"first_metric_name",
fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)},
tags{bucketLeftTag: "0", bucketRightTag: "15.5"},
)
assertContainsTaggedField(
t,
acc,
"first_metric_name",
fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)},
tags{bucketLeftTag: "15.5", bucketRightTag: "20"},
)
assertContainsTaggedField(
t,
acc,
"first_metric_name",
fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)},
tags{bucketLeftTag: "20", bucketRightTag: "30"},
)
assertContainsTaggedField(
t,
acc,
"first_metric_name",
fields{"a_bucket": int64(0), "b_bucket": int64(1), "c_bucket": int64(1)},
tags{bucketLeftTag: "30", bucketRightTag: "40"},
)
assertContainsTaggedField(
t,
acc,
"first_metric_name",
fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)},
tags{bucketLeftTag: "40", bucketRightTag: bucketPosInf},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketLeftTag: "0", bucketRightTag: "4"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketLeftTag: "4", bucketRightTag: "10"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketLeftTag: "10", bucketRightTag: "23"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketLeftTag: "23", bucketRightTag: "30"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketLeftTag: "30", bucketRightTag: bucketPosInf},
)
}
// TestHistogramWithTwoPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates
// getting added in different periods) for all fields
func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) {
var cfg []bucketConfig
cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg, false, true, false)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Push(acc)
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0)}, tags{bucketRightTag: "0"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0)}, tags{bucketRightTag: "10"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0)}, tags{bucketRightTag: "20"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0)}, tags{bucketRightTag: "30"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(1)}, tags{bucketRightTag: "40"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf})
acc.ClearMetrics()
histogram.Add(firstMetric2)
histogram.Push(acc)
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "0"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "10"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "20"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "30"})
assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: "40"})
assertContainsTaggedField(
t,
acc,
"first_metric_name",
fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)},
tags{bucketRightTag: bucketPosInf},
)
}
// TestWrongBucketsOrder tests the calling panic with incorrect order of buckets
func TestWrongBucketsOrder(t *testing.T) {
defer func() {
if r := recover(); r != nil {
require.Equal(
t,
"histogram buckets must be in increasing order: 90.00 >= 20.00, metrics: first_metric_name, field: a",
fmt.Sprint(r),
)
}
}()
var cfg []bucketConfig
cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg, false, true, false)
histogram.Add(firstMetric2)
}
// TestHistogram tests two metrics getting added and metric expiration
func TestHistogramMetricExpiration(t *testing.T) {
currentTime := time.Unix(10, 0)
timeNow = func() time.Time {
return currentTime
}
defer func() {
timeNow = time.Now
}()
cfg := []bucketConfig{
{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}},
{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}},
}
histogram := NewTestHistogramWithExpirationInterval(cfg, false, true, false, config.Duration(30))
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
currentTime = time.Unix(41, 0)
histogram.Add(secondMetric)
histogram.Push(acc)
require.Len(t, acc.Metrics, 6, "Incorrect number of metrics")
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketRightTag: "0"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketRightTag: "4"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketRightTag: "10"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketRightTag: "23"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketRightTag: "30"},
)
assertContainsTaggedField(
t,
acc,
"second_metric_name",
fields{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)},
tags{bucketRightTag: bucketPosInf},
)
}
// assertContainsTaggedField is help functions to test histogram data
func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, tags map[string]string) {
acc.Lock()
defer acc.Unlock()
for _, checkedMetric := range acc.Metrics {
// filter by metric name
if checkedMetric.Measurement != metricName {
continue
}
// filter by tags
isTagsIdentical := true
for tag := range tags {
if val, ok := checkedMetric.Tags[tag]; !ok || val != tags[tag] {
isTagsIdentical = false
break
}
}
if !isTagsIdentical {
continue
}
// filter by field keys
isFieldKeysIdentical := true
for field := range fields {
if _, ok := checkedMetric.Fields[field]; !ok {
isFieldKeysIdentical = false
break
}
}
if !isFieldKeysIdentical {
continue
}
// check fields with their counts
require.Equal(t, fields, checkedMetric.Fields)
return
}
require.Failf(t, "Unknown measurement", "Unknown measurement %q with tags: %v, fields: %v", metricName, tags, fields)
}

View file

@ -0,0 +1,40 @@
# Configuration for aggregate histogram metrics
[[aggregators.histogram]]
## The period in which to flush the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
## If true, the histogram will be reset on flush instead
## of accumulating the results.
reset = false
## Whether bucket values should be accumulated. If set to false, "gt" tag will be added.
## Defaults to true.
cumulative = true
## Expiration interval for each histogram. The histogram will be expired if
## there are no changes in any buckets for this time interval. 0 == no expiration.
# expiration_interval = "0m"
## If true, aggregated histogram are pushed to output only if it was updated since
## previous push. Defaults to false.
# push_only_on_update = false
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## Right borders of buckets (with +Inf implicitly added).
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## Right borders of buckets (with +Inf implicitly added).
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]

View file

@ -0,0 +1,49 @@
# Merge Aggregator Plugin
This plugin merges metrics of the same series and timestamp into new metrics
with the super-set of fields. A series here is defined by the metric name and
the tag key-value set.
Use this plugin when fields are split over multiple metrics, with the same
measurement, tag set and timestamp.
⭐ Telegraf v1.13.0
🏷️ transformation
💻 all
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Merge metrics into multifield metrics by series key
[[aggregators.merge]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
# period = "30s"
## Precision to round the metric timestamp to
## This is useful for cases where metrics to merge arrive within a small
## interval and thus vary in timestamp. The timestamp of the resulting metric
## is also rounded.
# round_timestamp_to = "1ns"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = true
```
## Example
```diff
- cpu,host=localhost usage_time=42 1567562620000000000
- cpu,host=localhost idle_time=42 1567562620000000000
+ cpu,host=localhost idle_time=42,usage_time=42 1567562620000000000
```

View file

@ -0,0 +1,63 @@
//go:generate ../../../tools/readme_config_includer/generator
package merge
import (
_ "embed"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/aggregators"
)
//go:embed sample.conf
var sampleConfig string
type Merge struct {
RoundTimestamp config.Duration `toml:"round_timestamp_to"`
grouper *metric.SeriesGrouper
}
func (*Merge) SampleConfig() string {
return sampleConfig
}
func (a *Merge) Init() error {
a.grouper = metric.NewSeriesGrouper()
return nil
}
func (a *Merge) Add(m telegraf.Metric) {
gm := m
if a.RoundTimestamp > 0 {
if unwrapped, ok := m.(telegraf.UnwrappableMetric); ok {
gm = unwrapped.Unwrap().Copy()
} else {
gm = m.Copy()
}
ts := gm.Time()
gm.SetTime(ts.Round(time.Duration(a.RoundTimestamp)))
}
a.grouper.AddMetric(gm)
}
func (a *Merge) Push(acc telegraf.Accumulator) {
// Always use nanosecond precision to avoid rounding metrics that were
// produced at a precision higher than the agent default.
acc.SetPrecision(time.Nanosecond)
for _, m := range a.grouper.Metrics() {
acc.AddMetric(m)
}
}
func (a *Merge) Reset() {
a.grouper = metric.NewSeriesGrouper()
}
func init() {
aggregators.Add("merge", func() telegraf.Aggregator {
return &Merge{}
})
}

View file

@ -0,0 +1,374 @@
package merge
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
func TestSimple(t *testing.T) {
plugin := &Merge{}
require.NoError(t, plugin.Init())
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 0),
),
)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 0),
),
)
var acc testutil.Accumulator
plugin.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
"time_guest": 42,
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func TestNanosecondPrecision(t *testing.T) {
plugin := &Merge{}
require.NoError(t, plugin.Init())
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 1),
),
)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 1),
),
)
var acc testutil.Accumulator
acc.SetPrecision(time.Second)
plugin.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
"time_guest": 42,
},
time.Unix(0, 1),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func TestNoRounding(t *testing.T) {
plugin := &Merge{}
require.NoError(t, plugin.Init())
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 23,
},
time.Unix(0, 1),
),
)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 2),
),
)
var acc testutil.Accumulator
acc.SetPrecision(time.Second)
plugin.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 23,
},
time.Unix(0, 1),
),
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 2),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func TestWithRounding(t *testing.T) {
plugin := &Merge{RoundTimestamp: config.Duration(10 * time.Nanosecond)}
require.NoError(t, plugin.Init())
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 23,
},
time.Unix(0, 1),
),
)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 2),
),
)
var acc testutil.Accumulator
acc.SetPrecision(time.Second)
plugin.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 23,
"time_guest": 42,
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func TestReset(t *testing.T) {
plugin := &Merge{}
require.NoError(t, plugin.Init())
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 0),
),
)
var acc testutil.Accumulator
plugin.Push(&acc)
plugin.Reset()
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 0),
),
)
plugin.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func BenchmarkMergeOne(b *testing.B) {
var merger Merge
require.NoError(b, merger.Init())
m := metric.New(
"mymetric",
map[string]string{
"host": "host.example.com",
"mykey": "myvalue",
"another key": "another value",
},
map[string]interface{}{
"f1": 1,
"f2": 2,
"f3": 3,
"f4": 4,
"f5": 5,
"f6": 6,
"f7": 7,
"f8": 8,
},
time.Now(),
)
var acc testutil.NopAccumulator
for n := 0; n < b.N; n++ {
merger.Reset()
merger.Add(m)
merger.Push(&acc)
}
}
func BenchmarkMergeTwo(b *testing.B) {
var merger Merge
require.NoError(b, merger.Init())
now := time.Now()
m1 := metric.New(
"mymetric",
map[string]string{
"host": "host.example.com",
"mykey": "myvalue",
"another key": "another value",
},
map[string]interface{}{
"f1": 1,
"f2": 2,
"f3": 3,
"f4": 4,
"f5": 5,
"f6": 6,
"f7": 7,
"f8": 8,
},
now,
)
m2 := metric.New(
"mymetric",
map[string]string{
"host": "host.example.com",
"mykey": "myvalue",
"another key": "another value",
},
map[string]interface{}{
"f8": 8,
"f9": 9,
"f10": 10,
"f11": 11,
"f12": 12,
"f13": 13,
"f14": 14,
"f15": 15,
"f16": 16,
},
now,
)
var acc testutil.NopAccumulator
for n := 0; n < b.N; n++ {
merger.Reset()
merger.Add(m1)
merger.Add(m2)
merger.Push(&acc)
}
}

View file

@ -0,0 +1,15 @@
# Merge metrics into multifield metrics by series key
[[aggregators.merge]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
# period = "30s"
## Precision to round the metric timestamp to
## This is useful for cases where metrics to merge arrive within a small
## interval and thus vary in timestamp. The timestamp of the resulting metric
## is also rounded.
# round_timestamp_to = "1ns"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = true

View file

@ -0,0 +1,56 @@
# Minimum-Maximum Aggregator Plugin
This plugin aggregates the minimum and maximum values of each field it sees,
emitting the aggrate every `period` seconds with field names suffixed by `_min`
and `_max` respectively.
⭐ Telegraf v1.1.0
🏷️ statistics
💻 all
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Keep the aggregate min/max of each metric passing through.
[[aggregators.minmax]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
```
## Measurements & Fields
- measurement1
- field1_max
- field1_min
## Tags
No tags are applied by this aggregator.
## Example Output
```text
system,host=tars load1=1.72 1475583980000000000
system,host=tars load1=1.6 1475583990000000000
system,host=tars load1=1.66 1475584000000000000
system,host=tars load1=1.63 1475584010000000000
system,host=tars load1_max=1.72,load1_min=1.6 1475584010000000000
system,host=tars load1=1.46 1475584020000000000
system,host=tars load1=1.39 1475584030000000000
system,host=tars load1=1.41 1475584040000000000
system,host=tars load1_max=1.46,load1_min=1.39 1475584040000000000
```

View file

@ -0,0 +1,114 @@
//go:generate ../../../tools/readme_config_includer/generator
package minmax
import (
_ "embed"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
//go:embed sample.conf
var sampleConfig string
type MinMax struct {
cache map[uint64]aggregate
}
func NewMinMax() telegraf.Aggregator {
mm := &MinMax{}
mm.Reset()
return mm
}
type aggregate struct {
fields map[string]minmax
name string
tags map[string]string
}
type minmax struct {
min float64
max float64
}
func (*MinMax) SampleConfig() string {
return sampleConfig
}
func (m *MinMax) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.cache[id]; !ok {
// hit an uncached metric, create caches for first time:
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fields: make(map[string]minmax),
}
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
a.fields[k] = minmax{
min: fv,
max: fv,
}
}
}
m.cache[id] = a
} else {
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
if _, ok := m.cache[id].fields[k]; !ok {
// hit an uncached field of a cached metric
m.cache[id].fields[k] = minmax{
min: fv,
max: fv,
}
continue
}
if fv < m.cache[id].fields[k].min {
tmp := m.cache[id].fields[k]
tmp.min = fv
m.cache[id].fields[k] = tmp
} else if fv > m.cache[id].fields[k].max {
tmp := m.cache[id].fields[k]
tmp.max = fv
m.cache[id].fields[k] = tmp
}
}
}
}
}
func (m *MinMax) Push(acc telegraf.Accumulator) {
for _, aggregate := range m.cache {
fields := make(map[string]interface{}, len(aggregate.fields))
for k, v := range aggregate.fields {
fields[k+"_min"] = v.min
fields[k+"_max"] = v.max
}
acc.AddFields(aggregate.name, fields, aggregate.tags)
}
}
func (m *MinMax) Reset() {
m.cache = make(map[uint64]aggregate)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
case uint64:
return float64(v), true
default:
return 0, false
}
}
func init() {
aggregators.Add("minmax", func() telegraf.Aggregator {
return NewMinMax()
})
}

View file

@ -0,0 +1,167 @@
package minmax
import (
"testing"
"time"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
var m1 = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(1),
"c": int64(1),
"d": int64(1),
"e": int64(1),
"f": float64(2),
"g": float64(2),
"h": float64(2),
"i": float64(2),
"j": float64(3),
},
time.Now(),
)
var m2 = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(3),
"c": int64(3),
"d": int64(3),
"e": int64(3),
"f": float64(1),
"g": float64(1),
"h": float64(1),
"i": float64(1),
"j": float64(1),
"k": float64(200),
"l": uint64(200),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
minmax := NewMinMax()
for n := 0; n < b.N; n++ {
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added.
func TestMinMaxWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewMinMax()
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
"b_max": float64(3),
"b_min": float64(1),
"c_max": float64(3),
"c_min": float64(1),
"d_max": float64(3),
"d_min": float64(1),
"e_max": float64(3),
"e_min": float64(1),
"f_max": float64(2),
"f_min": float64(1),
"g_max": float64(2),
"g_min": float64(1),
"h_max": float64(2),
"h_min": float64(1),
"i_max": float64(2),
"i_min": float64(1),
"j_max": float64(3),
"j_min": float64(1),
"k_max": float64(200),
"k_min": float64(200),
"l_max": float64(200),
"l_min": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestMinMaxDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewMinMax()
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
"b_max": float64(1),
"b_min": float64(1),
"c_max": float64(1),
"c_min": float64(1),
"d_max": float64(1),
"d_min": float64(1),
"e_max": float64(1),
"e_min": float64(1),
"f_max": float64(2),
"f_min": float64(2),
"g_max": float64(2),
"g_min": float64(2),
"h_max": float64(2),
"h_min": float64(2),
"i_max": float64(2),
"i_min": float64(2),
"j_max": float64(3),
"j_min": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
"b_max": float64(3),
"b_min": float64(3),
"c_max": float64(3),
"c_min": float64(3),
"d_max": float64(3),
"d_min": float64(3),
"e_max": float64(3),
"e_min": float64(3),
"f_max": float64(1),
"f_min": float64(1),
"g_max": float64(1),
"g_min": float64(1),
"h_max": float64(1),
"h_min": float64(1),
"i_max": float64(1),
"i_min": float64(1),
"j_max": float64(1),
"j_min": float64(1),
"k_max": float64(200),
"k_min": float64(200),
"l_max": float64(200),
"l_min": float64(200),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}

View file

@ -0,0 +1,9 @@
# Keep the aggregate min/max of each metric passing through.
[[aggregators.minmax]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false

View file

@ -0,0 +1,145 @@
# Quantile Aggregator Plugin
This plugin aggregates each numeric field per metric into the specified
quantiles and emits the quantiles every `period`. Different aggregation
algorithms are supported with varying accuracy and limitations.
⭐ Telegraf v1.18.0
🏷️ statistics
💻 all
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Keep the aggregate quantiles of each metric passing through.
[[aggregators.quantile]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
## Quantiles to output in the range [0,1]
# quantiles = [0.25, 0.5, 0.75]
## Type of aggregation algorithm
## Supported are:
## "t-digest" -- approximation using centroids, can cope with large number of samples
## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7)
## "exact R8" -- exact computation (Hyndman & Fan 1996 R8)
## NOTE: Do not use "exact" algorithms with large number of samples
## to not impair performance or memory consumption!
# algorithm = "t-digest"
## Compression for approximation (t-digest). The value needs to be
## greater or equal to 1.0. Smaller values will result in more
## performance but less accuracy.
# compression = 100.0
```
## Algorithm types
### t-digest
Proposed by [Dunning & Ertl (2019)][tdigest_paper] this type uses a
special data-structure to cluster data. These clusters are later used
to approximate the requested quantiles. The bounds of the approximation
can be controlled by the `compression` setting where smaller values
result in higher performance but less accuracy.
Due to its incremental nature, this algorithm can handle large
numbers of samples efficiently. It is recommended for applications
where exact quantile calculation isn't required.
For implementation details see the underlying [golang library][tdigest_lib].
### exact R7 and R8
These algorithms compute quantiles as described in [Hyndman & Fan
(1996)][hyndman_fan]. The R7 variant is used in Excel and NumPy. The R8
variant is recommended by Hyndman & Fan due to its independence of the
underlying sample distribution.
These algorithms save all data for the aggregation `period`. They require a lot
of memory when used with a large number of series or a large number of
samples. They are slower than the `t-digest` algorithm and are recommended only
to be used with a small number of samples and series.
## Benchmark (linux/amd64)
The benchmark was performed by adding 100 metrics with six numeric
(and two non-numeric) fields to the aggregator and the derive the aggregation
result.
| algorithm | # quantiles | avg. runtime |
| :------------ | -------------:| -------------:|
| t-digest | 3 | 376372 ns/op |
| exact R7 | 3 | 9782946 ns/op |
| exact R8 | 3 | 9158205 ns/op |
| t-digest | 100 | 899204 ns/op |
| exact R7 | 100 | 7868816 ns/op |
| exact R8 | 100 | 8099612 ns/op |
## Measurements
Measurement names are passed through this aggregator.
### Fields
For all numeric fields (int32/64, uint32/64 and float32/64) new *quantile*
fields are aggregated in the form `<fieldname>_<quantile*100>`. Other field
types (e.g. boolean, string) are ignored and dropped from the output.
For example passing in the following metric as *input*:
- somemetric
- average_response_ms (float64)
- minimum_response_ms (float64)
- maximum_response_ms (float64)
- status (string)
- ok (boolean)
and the default setting for `quantiles` you get the following *output*
- somemetric
- average_response_ms_025 (float64)
- average_response_ms_050 (float64)
- average_response_ms_075 (float64)
- minimum_response_ms_025 (float64)
- minimum_response_ms_050 (float64)
- minimum_response_ms_075 (float64)
- maximum_response_ms_025 (float64)
- maximum_response_ms_050 (float64)
- maximum_response_ms_075 (float64)
The `status` and `ok` fields are dropped because they are not numeric. Note
that the number of resulting fields scales with the number of `quantiles`
specified.
### Tags
Tags are passed through to the output by this aggregator.
### Example Output
```text
cpu,cpu=cpu-total,host=Hugin usage_user=10.814851731872487,usage_system=2.1679541490155687,usage_irq=1.046598554697342,usage_steal=0,usage_guest_nice=0,usage_idle=85.79616247197244,usage_nice=0,usage_iowait=0,usage_softirq=0.1744330924495688,usage_guest=0 1608288360000000000
cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_system=2.1601016518428664,usage_iowait=0.02541296060990694,usage_irq=1.0165184243964942,usage_softirq=0.1778907242693666,usage_steal=0,usage_guest_nice=0,usage_user=9.275730622616953,usage_idle=87.34434561626493,usage_nice=0 1608288370000000000
cpu,cpu=cpu-total,host=Hugin usage_idle=85.78199052131747,usage_nice=0,usage_irq=1.0476428036915637,usage_guest=0,usage_guest_nice=0,usage_system=1.995510102269591,usage_iowait=0,usage_softirq=0.1995510102269662,usage_steal=0,usage_user=10.975305562484735 1608288380000000000
cpu,cpu=cpu-total,host=Hugin usage_guest_nice_075=0,usage_user_050=10.814851731872487,usage_guest_075=0,usage_steal_025=0,usage_irq_025=1.031558489546918,usage_irq_075=1.0471206791944527,usage_iowait_025=0,usage_guest_050=0,usage_guest_nice_050=0,usage_nice_075=0,usage_iowait_050=0,usage_system_050=2.1601016518428664,usage_irq_050=1.046598554697342,usage_guest_nice_025=0,usage_idle_050=85.79616247197244,usage_softirq_075=0.1887208672481664,usage_steal_075=0,usage_system_025=2.0778058770562287,usage_system_075=2.1640279004292173,usage_softirq_050=0.1778907242693666,usage_nice_050=0,usage_iowait_075=0.01270648030495347,usage_user_075=10.895078647178611,usage_nice_025=0,usage_steal_050=0,usage_user_025=10.04529117724472,usage_idle_025=85.78907649664495,usage_idle_075=86.57025404411868,usage_softirq_025=0.1761619083594677,usage_guest_025=0 1608288390000000000
```
[tdigest_paper]: https://arxiv.org/abs/1902.04023
[tdigest_lib]: https://github.com/caio/go-tdigest
[hyndman_fan]: http://www.maths.usyd.edu.au/u/UG/SM/STAT3022/r/current/Misc/Sample%20Quantiles%20in%20Statistical%20Packages.pdf

View file

@ -0,0 +1,108 @@
package quantile
import (
"math"
"sort"
"github.com/caio/go-tdigest"
)
type algorithm interface {
Add(value float64) error
Quantile(q float64) float64
}
func newTDigest(compression float64) (algorithm, error) {
return tdigest.New(tdigest.Compression(compression))
}
type exactAlgorithmR7 struct {
xs []float64
sorted bool
}
func newExactR7(_ float64) (algorithm, error) {
return &exactAlgorithmR7{xs: make([]float64, 0, 100), sorted: false}, nil
}
func (e *exactAlgorithmR7) Add(value float64) error {
e.xs = append(e.xs, value)
e.sorted = false
return nil
}
func (e *exactAlgorithmR7) Quantile(q float64) float64 {
size := len(e.xs)
// No information
if len(e.xs) == 0 {
return math.NaN()
}
// Sort the array if necessary
if !e.sorted {
sort.Float64s(e.xs)
e.sorted = true
}
// Get the quantile index and the fraction to the neighbor
// Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R7
// Same as Excel and Numpy.
n := q * (float64(size) - 1)
i, gamma := math.Modf(n)
j := int(i)
if j < 0 {
return e.xs[0]
}
if j >= size {
return e.xs[size-1]
}
// Linear interpolation
return e.xs[j] + gamma*(e.xs[j+1]-e.xs[j])
}
type exactAlgorithmR8 struct {
xs []float64
sorted bool
}
func newExactR8(_ float64) (algorithm, error) {
return &exactAlgorithmR8{xs: make([]float64, 0, 100), sorted: false}, nil
}
func (e *exactAlgorithmR8) Add(value float64) error {
e.xs = append(e.xs, value)
e.sorted = false
return nil
}
func (e *exactAlgorithmR8) Quantile(q float64) float64 {
size := len(e.xs)
// No information
if size == 0 {
return math.NaN()
}
// Sort the array if necessary
if !e.sorted {
sort.Float64s(e.xs)
e.sorted = true
}
// Get the quantile index and the fraction to the neighbor
// Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R8
n := q*(float64(size)+1.0/3.0) - (2.0 / 3.0) // Indices are zero-base here but one-based in the paper
i, gamma := math.Modf(n)
j := int(i)
if j < 0 {
return e.xs[0]
}
if j >= size {
return e.xs[size-1]
}
// Linear interpolation
return e.xs[j] + gamma*(e.xs[j+1]-e.xs[j])
}

View file

@ -0,0 +1,149 @@
//go:generate ../../../tools/readme_config_includer/generator
package quantile
import (
_ "embed"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
//go:embed sample.conf
var sampleConfig string
type Quantile struct {
Quantiles []float64 `toml:"quantiles"`
Compression float64 `toml:"compression"`
AlgorithmType string `toml:"algorithm"`
newAlgorithm newAlgorithmFunc
cache map[uint64]aggregate
suffixes []string
Log telegraf.Logger `toml:"-"`
}
type aggregate struct {
name string
fields map[string]algorithm
tags map[string]string
}
type newAlgorithmFunc func(compression float64) (algorithm, error)
func (*Quantile) SampleConfig() string {
return sampleConfig
}
func (q *Quantile) Add(in telegraf.Metric) {
id := in.HashID()
if cached, ok := q.cache[id]; ok {
fields := in.Fields()
for k, algo := range cached.fields {
if field, ok := fields[k]; ok {
if v, isconvertible := convert(field); isconvertible {
err := algo.Add(v)
if err != nil {
q.Log.Errorf("adding cached field %s: %v", k, err)
}
}
}
}
return
}
// New metric, setup cache and init algorithm
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fields: make(map[string]algorithm),
}
for k, field := range in.Fields() {
if v, isconvertible := convert(field); isconvertible {
algo, err := q.newAlgorithm(q.Compression)
if err != nil {
q.Log.Errorf("generating algorithm %s: %v", k, err)
}
err = algo.Add(v)
if err != nil {
q.Log.Errorf("adding field %s: %v", k, err)
}
a.fields[k] = algo
}
}
q.cache[id] = a
}
func (q *Quantile) Push(acc telegraf.Accumulator) {
for _, aggregate := range q.cache {
fields := make(map[string]interface{}, len(aggregate.fields)*len(q.Quantiles))
for k, algo := range aggregate.fields {
for i, qtl := range q.Quantiles {
fields[k+q.suffixes[i]] = algo.Quantile(qtl)
}
}
acc.AddFields(aggregate.name, fields, aggregate.tags)
}
}
func (q *Quantile) Reset() {
q.cache = make(map[uint64]aggregate)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
case uint64:
return float64(v), true
default:
return 0, false
}
}
func (q *Quantile) Init() error {
switch q.AlgorithmType {
case "t-digest", "":
q.newAlgorithm = newTDigest
case "exact R7":
q.newAlgorithm = newExactR7
case "exact R8":
q.newAlgorithm = newExactR8
default:
return fmt.Errorf("unknown algorithm type %q", q.AlgorithmType)
}
if _, err := q.newAlgorithm(q.Compression); err != nil {
return fmt.Errorf("cannot create %q algorithm: %w", q.AlgorithmType, err)
}
if len(q.Quantiles) == 0 {
q.Quantiles = []float64{0.25, 0.5, 0.75}
}
duplicates := make(map[float64]bool)
q.suffixes = make([]string, 0, len(q.Quantiles))
for _, qtl := range q.Quantiles {
if qtl < 0.0 || qtl > 1.0 {
return fmt.Errorf("quantile %v out of range", qtl)
}
if _, found := duplicates[qtl]; found {
return fmt.Errorf("duplicate quantile %v", qtl)
}
duplicates[qtl] = true
q.suffixes = append(q.suffixes, fmt.Sprintf("_%03d", int(qtl*100.0)))
}
q.Reset()
return nil
}
func init() {
aggregators.Add("quantile", func() telegraf.Aggregator {
return &Quantile{Compression: 100}
})
}

View file

@ -0,0 +1,671 @@
package quantile
import (
"math/rand"
"testing"
"time"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestConfigInvalidAlgorithm(t *testing.T) {
q := Quantile{AlgorithmType: "a strange one"}
err := q.Init()
require.Error(t, err)
require.Contains(t, err.Error(), "unknown algorithm type")
}
func TestConfigInvalidCompression(t *testing.T) {
q := Quantile{Compression: 0, AlgorithmType: "t-digest"}
err := q.Init()
require.Error(t, err)
require.Contains(t, err.Error(), "cannot create \"t-digest\" algorithm")
}
func TestConfigInvalidQuantiles(t *testing.T) {
q := Quantile{Compression: 100, Quantiles: []float64{-0.5}}
err := q.Init()
require.Error(t, err)
require.Contains(t, err.Error(), "quantile -0.5 out of range")
q = Quantile{Compression: 100, Quantiles: []float64{1.5}}
err = q.Init()
require.Error(t, err)
require.Contains(t, err.Error(), "quantile 1.5 out of range")
q = Quantile{Compression: 100, Quantiles: []float64{0.1, 0.2, 0.3, 0.1}}
err = q.Init()
require.Error(t, err)
require.Contains(t, err.Error(), "duplicate quantile")
}
func TestSingleMetricTDigest(t *testing.T) {
acc := testutil.Accumulator{}
q := Quantile{
Compression: 100,
Log: testutil.Logger{},
}
err := q.Init()
require.NoError(t, err)
expected := []telegraf.Metric{
testutil.MustMetric(
"test",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a_025": 24.75,
"a_050": 49.50,
"a_075": 74.25,
"b_025": 24.75,
"b_050": 49.50,
"b_075": 74.25,
"c_025": 24.75,
"c_050": 49.50,
"c_075": 74.25,
"d_025": 24.75,
"d_050": 49.50,
"d_075": 74.25,
"e_025": 24.75,
"e_050": 49.50,
"e_075": 74.25,
"f_025": 24.75,
"f_050": 49.50,
"f_075": 74.25,
"g_025": 0.2475,
"g_050": 0.4950,
"g_075": 0.7425,
},
time.Now(),
),
}
metrics := make([]telegraf.Metric, 0, 100)
for i := 0; i < 100; i++ {
metrics = append(metrics, testutil.MustMetric(
"test",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int32(i),
"b": int64(i),
"c": uint32(i),
"d": uint64(i),
"e": float32(i),
"f": float64(i),
"g": float64(i) / 100.0,
"x1": "string",
"x2": true,
},
time.Now(),
))
}
for _, m := range metrics {
q.Add(m)
}
q.Push(&acc)
epsilon := cmpopts.EquateApprox(0, 1e-3)
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon)
}
func TestMultipleMetricsTDigest(t *testing.T) {
acc := testutil.Accumulator{}
q := Quantile{
Compression: 100,
Log: testutil.Logger{},
}
err := q.Init()
require.NoError(t, err)
expected := []telegraf.Metric{
testutil.MustMetric(
"test",
map[string]string{"series": "foo"},
map[string]interface{}{
"a_025": 24.75, "a_050": 49.50, "a_075": 74.25,
"b_025": 24.75, "b_050": 49.50, "b_075": 74.25,
},
time.Now(),
),
testutil.MustMetric(
"test",
map[string]string{"series": "bar"},
map[string]interface{}{
"a_025": 49.50, "a_050": 99.00, "a_075": 148.50,
"b_025": 49.50, "b_050": 99.00, "b_075": 148.50,
},
time.Now(),
),
}
metricsA := make([]telegraf.Metric, 0, 100)
metricsB := make([]telegraf.Metric, 0, 100)
for i := 0; i < 100; i++ {
metricsA = append(metricsA, testutil.MustMetric(
"test",
map[string]string{"series": "foo"},
map[string]interface{}{"a": int64(i), "b": float64(i), "x1": "string", "x2": true},
time.Now(),
))
metricsB = append(metricsB, testutil.MustMetric(
"test",
map[string]string{"series": "bar"},
map[string]interface{}{"a": int64(2 * i), "b": float64(2 * i), "x1": "string", "x2": true},
time.Now(),
))
}
for _, m := range metricsA {
q.Add(m)
}
for _, m := range metricsB {
q.Add(m)
}
q.Push(&acc)
epsilon := cmpopts.EquateApprox(0, 1e-3)
sort := testutil.SortMetrics()
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon, sort)
}
func TestSingleMetricExactR7(t *testing.T) {
acc := testutil.Accumulator{}
q := Quantile{
AlgorithmType: "exact R7",
Log: testutil.Logger{},
}
err := q.Init()
require.NoError(t, err)
expected := []telegraf.Metric{
testutil.MustMetric(
"test",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a_025": 24.75,
"a_050": 49.50,
"a_075": 74.25,
"b_025": 24.75,
"b_050": 49.50,
"b_075": 74.25,
"c_025": 24.75,
"c_050": 49.50,
"c_075": 74.25,
"d_025": 24.75,
"d_050": 49.50,
"d_075": 74.25,
"e_025": 24.75,
"e_050": 49.50,
"e_075": 74.25,
"f_025": 24.75,
"f_050": 49.50,
"f_075": 74.25,
"g_025": 0.2475,
"g_050": 0.4950,
"g_075": 0.7425,
},
time.Now(),
),
}
metrics := make([]telegraf.Metric, 0, 100)
for i := 0; i < 100; i++ {
metrics = append(metrics, testutil.MustMetric(
"test",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int32(i),
"b": int64(i),
"c": uint32(i),
"d": uint64(i),
"e": float32(i),
"f": float64(i),
"g": float64(i) / 100.0,
"x1": "string",
"x2": true,
},
time.Now(),
))
}
for _, m := range metrics {
q.Add(m)
}
q.Push(&acc)
epsilon := cmpopts.EquateApprox(0, 1e-3)
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon)
}
func TestMultipleMetricsExactR7(t *testing.T) {
acc := testutil.Accumulator{}
q := Quantile{
AlgorithmType: "exact R7",
Log: testutil.Logger{},
}
err := q.Init()
require.NoError(t, err)
expected := []telegraf.Metric{
testutil.MustMetric(
"test",
map[string]string{"series": "foo"},
map[string]interface{}{
"a_025": 24.75, "a_050": 49.50, "a_075": 74.25,
"b_025": 24.75, "b_050": 49.50, "b_075": 74.25,
},
time.Now(),
),
testutil.MustMetric(
"test",
map[string]string{"series": "bar"},
map[string]interface{}{
"a_025": 49.50, "a_050": 99.00, "a_075": 148.50,
"b_025": 49.50, "b_050": 99.00, "b_075": 148.50,
},
time.Now(),
),
}
metricsA := make([]telegraf.Metric, 0, 100)
metricsB := make([]telegraf.Metric, 0, 100)
for i := 0; i < 100; i++ {
metricsA = append(metricsA, testutil.MustMetric(
"test",
map[string]string{"series": "foo"},
map[string]interface{}{"a": int64(i), "b": float64(i), "x1": "string", "x2": true},
time.Now(),
))
metricsB = append(metricsB, testutil.MustMetric(
"test",
map[string]string{"series": "bar"},
map[string]interface{}{"a": int64(2 * i), "b": float64(2 * i), "x1": "string", "x2": true},
time.Now(),
))
}
for _, m := range metricsA {
q.Add(m)
}
for _, m := range metricsB {
q.Add(m)
}
q.Push(&acc)
epsilon := cmpopts.EquateApprox(0, 1e-3)
sort := testutil.SortMetrics()
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon, sort)
}
func TestSingleMetricExactR8(t *testing.T) {
acc := testutil.Accumulator{}
q := Quantile{
AlgorithmType: "exact R8",
Log: testutil.Logger{},
}
err := q.Init()
require.NoError(t, err)
expected := []telegraf.Metric{
testutil.MustMetric(
"test",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a_025": 24.417,
"a_050": 49.500,
"a_075": 74.583,
"b_025": 24.417,
"b_050": 49.500,
"b_075": 74.583,
"c_025": 24.417,
"c_050": 49.500,
"c_075": 74.583,
"d_025": 24.417,
"d_050": 49.500,
"d_075": 74.583,
"e_025": 24.417,
"e_050": 49.500,
"e_075": 74.583,
"f_025": 24.417,
"f_050": 49.500,
"f_075": 74.583,
"g_025": 0.24417,
"g_050": 0.49500,
"g_075": 0.74583,
},
time.Now(),
),
}
metrics := make([]telegraf.Metric, 0, 100)
for i := 0; i < 100; i++ {
metrics = append(metrics, testutil.MustMetric(
"test",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int32(i),
"b": int64(i),
"c": uint32(i),
"d": uint64(i),
"e": float32(i),
"f": float64(i),
"g": float64(i) / 100.0,
"x1": "string",
"x2": true,
},
time.Now(),
))
}
for _, m := range metrics {
q.Add(m)
}
q.Push(&acc)
epsilon := cmpopts.EquateApprox(0, 1e-3)
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon)
}
func TestMultipleMetricsExactR8(t *testing.T) {
acc := testutil.Accumulator{}
q := Quantile{
AlgorithmType: "exact R8",
Log: testutil.Logger{},
}
err := q.Init()
require.NoError(t, err)
expected := []telegraf.Metric{
testutil.MustMetric(
"test",
map[string]string{"series": "foo"},
map[string]interface{}{
"a_025": 24.417, "a_050": 49.500, "a_075": 74.583,
"b_025": 24.417, "b_050": 49.500, "b_075": 74.583,
},
time.Now(),
),
testutil.MustMetric(
"test",
map[string]string{"series": "bar"},
map[string]interface{}{
"a_025": 48.833, "a_050": 99.000, "a_075": 149.167,
"b_025": 48.833, "b_050": 99.000, "b_075": 149.167,
},
time.Now(),
),
}
metricsA := make([]telegraf.Metric, 0, 100)
metricsB := make([]telegraf.Metric, 0, 100)
for i := 0; i < 100; i++ {
metricsA = append(metricsA, testutil.MustMetric(
"test",
map[string]string{"series": "foo"},
map[string]interface{}{"a": int64(i), "b": float64(i), "x1": "string", "x2": true},
time.Now(),
))
metricsB = append(metricsB, testutil.MustMetric(
"test",
map[string]string{"series": "bar"},
map[string]interface{}{"a": int64(2 * i), "b": float64(2 * i), "x1": "string", "x2": true},
time.Now(),
))
}
for _, m := range metricsA {
q.Add(m)
}
for _, m := range metricsB {
q.Add(m)
}
q.Push(&acc)
epsilon := cmpopts.EquateApprox(0, 1e-3)
sort := testutil.SortMetrics()
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon, sort)
}
func BenchmarkDefaultTDigest(b *testing.B) {
metrics := make([]telegraf.Metric, 0, 100)
for i := 0; i < 100; i++ {
metrics = append(metrics, testutil.MustMetric(
"test",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": rand.Int31(),
"b": rand.Int63(),
"c": rand.Uint32(),
"d": rand.Uint64(),
"e": rand.Float32(),
"f": rand.Float64(),
"x1": "string",
"x2": true,
},
time.Now(),
))
}
q := Quantile{
Compression: 100,
Log: testutil.Logger{},
}
err := q.Init()
require.NoError(b, err)
acc := testutil.Accumulator{}
for n := 0; n < b.N; n++ {
for _, m := range metrics {
q.Add(m)
}
q.Push(&acc)
}
}
func BenchmarkDefaultTDigest100Q(b *testing.B) {
metrics := make([]telegraf.Metric, 0, 100)
for i := 0; i < 100; i++ {
metrics = append(metrics, testutil.MustMetric(
"test",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": rand.Int31(),
"b": rand.Int63(),
"c": rand.Uint32(),
"d": rand.Uint64(),
"e": rand.Float32(),
"f": rand.Float64(),
"x1": "string",
"x2": true,
},
time.Now(),
))
}
quantiles := make([]float64, 0, 100)
for i := 0; i < 100; i++ {
quantiles = append(quantiles, 0.01*float64(i))
}
q := Quantile{
Compression: 100,
Quantiles: quantiles,
Log: testutil.Logger{},
}
err := q.Init()
require.NoError(b, err)
acc := testutil.Accumulator{}
for n := 0; n < b.N; n++ {
for _, m := range metrics {
q.Add(m)
}
q.Push(&acc)
}
}
func BenchmarkDefaultExactR7(b *testing.B) {
metrics := make([]telegraf.Metric, 0, 100)
for i := 0; i < 100; i++ {
metrics = append(metrics, testutil.MustMetric(
"test",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": rand.Int31(),
"b": rand.Int63(),
"c": rand.Uint32(),
"d": rand.Uint64(),
"e": rand.Float32(),
"f": rand.Float64(),
"x1": "string",
"x2": true,
},
time.Now(),
))
}
q := Quantile{
AlgorithmType: "exact R7",
Log: testutil.Logger{},
}
err := q.Init()
require.NoError(b, err)
acc := testutil.Accumulator{}
for n := 0; n < b.N; n++ {
for _, m := range metrics {
q.Add(m)
}
q.Push(&acc)
}
}
func BenchmarkDefaultExactR7100Q(b *testing.B) {
metrics := make([]telegraf.Metric, 0, 100)
for i := 0; i < 100; i++ {
metrics = append(metrics, testutil.MustMetric(
"test",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": rand.Int31(),
"b": rand.Int63(),
"c": rand.Uint32(),
"d": rand.Uint64(),
"e": rand.Float32(),
"f": rand.Float64(),
"x1": "string",
"x2": true,
},
time.Now(),
))
}
quantiles := make([]float64, 0, 100)
for i := 0; i < 100; i++ {
quantiles = append(quantiles, 0.01*float64(i))
}
q := Quantile{
AlgorithmType: "exact R7",
Quantiles: quantiles,
Log: testutil.Logger{},
}
err := q.Init()
require.NoError(b, err)
acc := testutil.Accumulator{}
for n := 0; n < b.N; n++ {
for _, m := range metrics {
q.Add(m)
}
q.Push(&acc)
}
}
func BenchmarkDefaultExactR8(b *testing.B) {
metrics := make([]telegraf.Metric, 0, 100)
for i := 0; i < 100; i++ {
metrics = append(metrics, testutil.MustMetric(
"test",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": rand.Int31(),
"b": rand.Int63(),
"c": rand.Uint32(),
"d": rand.Uint64(),
"e": rand.Float32(),
"f": rand.Float64(),
"x1": "string",
"x2": true,
},
time.Now(),
))
}
q := Quantile{
AlgorithmType: "exact R8",
Log: testutil.Logger{},
}
err := q.Init()
require.NoError(b, err)
acc := testutil.Accumulator{}
for n := 0; n < b.N; n++ {
for _, m := range metrics {
q.Add(m)
}
q.Push(&acc)
}
}
func BenchmarkDefaultExactR8100Q(b *testing.B) {
metrics := make([]telegraf.Metric, 0, 100)
for i := 0; i < 100; i++ {
metrics = append(metrics, testutil.MustMetric(
"test",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": rand.Int31(),
"b": rand.Int63(),
"c": rand.Uint32(),
"d": rand.Uint64(),
"e": rand.Float32(),
"f": rand.Float64(),
"x1": "string",
"x2": true,
},
time.Now(),
))
}
quantiles := make([]float64, 0, 100)
for i := 0; i < 100; i++ {
quantiles = append(quantiles, 0.01*float64(i))
}
q := Quantile{
AlgorithmType: "exact R8",
Quantiles: quantiles,
Log: testutil.Logger{},
}
err := q.Init()
require.NoError(b, err)
acc := testutil.Accumulator{}
for n := 0; n < b.N; n++ {
for _, m := range metrics {
q.Add(m)
}
q.Push(&acc)
}
}

View file

@ -0,0 +1,26 @@
# Keep the aggregate quantiles of each metric passing through.
[[aggregators.quantile]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
## Quantiles to output in the range [0,1]
# quantiles = [0.25, 0.5, 0.75]
## Type of aggregation algorithm
## Supported are:
## "t-digest" -- approximation using centroids, can cope with large number of samples
## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7)
## "exact R8" -- exact computation (Hyndman & Fan 1996 R8)
## NOTE: Do not use "exact" algorithms with large number of samples
## to not impair performance or memory consumption!
# algorithm = "t-digest"
## Compression for approximation (t-digest). The value needs to be
## greater or equal to 1.0. Smaller values will result in more
## performance but less accuracy.
# compression = 100.0

View file

@ -0,0 +1,11 @@
package aggregators
import "github.com/influxdata/telegraf"
type Creator func() telegraf.Aggregator
var Aggregators = make(map[string]Creator)
func Add(name string, creator Creator) {
Aggregators[name] = creator
}

View file

@ -0,0 +1,137 @@
# Starlark Aggregator Plugin
This plugin allows to implement a custom aggregator plugin via a
[Starlark][starlark] script.
The Starlark language is a dialect of Python and will be familiar to those who
have experience with the Python language. However, there are major
[differences](#python-differences). Existing Python code is unlikely to work
unmodified.
> [!NOTE]
> The execution environment is sandboxed, and it is not possible to access the
> local filesystem or perfoming network operations. This is by design of the
> Starlark language as a configuration language.
The Starlark script used by this plugin needs to be composed of the three
methods defining an aggreagtor named `add`, `push` and `reset`.
The `add` method is called as soon as a new metric is added to the plugin the
metrics to the aggregator. After `period`, the `push` method is called to
output the resulting metrics and finally the aggregation is reset by using the
`reset` method of the Starlark script.
The Starlark functions might use the global function `state` to keep aggregation
information such as added metrics etc.
More details on the syntax and available functions can be found in the
[Starlark specification][spec].
⭐ Telegraf v1.21.0
🏷️ transformation
💻 all
[starlark]: https://github.com/google/starlark-go
[spec]: https://github.com/google/starlark-go/blob/d1966c6b9fcd/doc/spec.md
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Aggregate metrics using a Starlark script
[[aggregators.starlark]]
## The Starlark source can be set as a string in this configuration file, or
## by referencing a file containing the script. Only one source or script
## should be set at once.
##
## Source of the Starlark script.
source = '''
state = {}
def add(metric):
state["last"] = metric
def push():
return state.get("last")
def reset():
state.clear()
'''
## File containing a Starlark script.
# script = "/usr/local/bin/myscript.star"
## The constants of the Starlark script.
# [aggregators.starlark.constants]
# max_size = 10
# threshold = 0.75
# default_name = "Julia"
# debug_mode = true
```
## Usage
The Starlark code should contain a function called `add` that takes a metric as
argument. The function will be called with each metric to add, and doesn't
return anything.
```python
def add(metric):
state["last"] = metric
```
The Starlark code should also contain a function called `push` that doesn't take
any argument. The function will be called to compute the aggregation, and
returns the metrics to push to the accumulator.
```python
def push():
return state.get("last")
```
The Starlark code should also contain a function called `reset` that doesn't
take any argument. The function will be called to reset the plugin, and doesn't
return anything.
```python
def reset():
state.clear()
```
For a list of available types and functions that can be used in the code, see
the [Starlark specification][spec].
## Python Differences
Refer to the section [Python
Differences](../../processors/starlark/README.md#python-differences) of the
documentation about the Starlark processor.
## Libraries available
Refer to the section [Libraries
available](../../processors/starlark/README.md#libraries-available) of the
documentation about the Starlark processor.
## Common Questions
Refer to the section [Common
Questions](../../processors/starlark/README.md#common-questions) of the
documentation about the Starlark processor.
## Examples
- [minmax](testdata/min_max.star)
- [merge](testdata/merge.star)
[All examples](testdata) are in the testdata folder.
Open a Pull Request to add any other useful Starlark examples.

View file

@ -0,0 +1,29 @@
# Aggregate metrics using a Starlark script
[[aggregators.starlark]]
## The Starlark source can be set as a string in this configuration file, or
## by referencing a file containing the script. Only one source or script
## should be set at once.
##
## Source of the Starlark script.
source = '''
state = {}
def add(metric):
state["last"] = metric
def push():
return state.get("last")
def reset():
state.clear()
'''
## File containing a Starlark script.
# script = "/usr/local/bin/myscript.star"
## The constants of the Starlark script.
# [aggregators.starlark.constants]
# max_size = 10
# threshold = 0.75
# default_name = "Julia"
# debug_mode = true

View file

@ -0,0 +1,114 @@
//go:generate ../../../tools/readme_config_includer/generator
package starlark
import (
_ "embed"
"go.starlark.net/starlark"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
common "github.com/influxdata/telegraf/plugins/common/starlark"
)
//go:embed sample.conf
var sampleConfig string
type Starlark struct {
common.Common
}
func (*Starlark) SampleConfig() string {
return sampleConfig
}
func (s *Starlark) Init() error {
// Execute source
err := s.Common.Init()
if err != nil {
return err
}
// The source should define an add function.
err = s.AddFunction("add", &common.Metric{})
if err != nil {
return err
}
// The source should define a push function.
err = s.AddFunction("push")
if err != nil {
return err
}
// The source should define a reset function.
err = s.AddFunction("reset")
if err != nil {
return err
}
return nil
}
func (s *Starlark) Add(metric telegraf.Metric) {
parameters, found := s.GetParameters("add")
if !found {
s.Log.Errorf("The parameters of the add function could not be found")
return
}
parameters[0].(*common.Metric).Wrap(metric)
_, err := s.Call("add")
if err != nil {
s.LogError(err)
}
}
func (s *Starlark) Push(acc telegraf.Accumulator) {
rv, err := s.Call("push")
if err != nil {
s.LogError(err)
acc.AddError(err)
return
}
switch rv := rv.(type) {
case *starlark.List:
iter := rv.Iterate()
defer iter.Done()
var v starlark.Value
for iter.Next(&v) {
switch v := v.(type) {
case *common.Metric:
m := v.Unwrap()
acc.AddMetric(m)
default:
s.Log.Errorf("Invalid type returned in list: %s", v.Type())
}
}
case *common.Metric:
m := rv.Unwrap()
acc.AddMetric(m)
case starlark.NoneType:
default:
s.Log.Errorf("Invalid type returned: %T", rv)
}
}
func (s *Starlark) Reset() {
_, err := s.Call("reset")
if err != nil {
s.LogError(err)
}
}
// init initializes starlark aggregator plugin
func init() {
aggregators.Add("starlark", func() telegraf.Aggregator {
return &Starlark{
Common: common.Common{
StarlarkLoadFunc: common.LoadFunc,
},
}
})
}

View file

@ -0,0 +1,434 @@
package starlark
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
common "github.com/influxdata/telegraf/plugins/common/starlark"
"github.com/influxdata/telegraf/testutil"
)
var m1 = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(1),
"c": int64(1),
"d": int64(1),
"e": int64(1),
"f": int64(2),
"g": int64(2),
"h": int64(2),
"i": int64(2),
"j": int64(3),
},
time.Now(),
)
var m2 = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(3),
"c": int64(3),
"d": int64(3),
"e": int64(3),
"f": int64(1),
"g": int64(1),
"h": int64(1),
"i": int64(1),
"j": int64(1),
"k": int64(200),
"l": int64(200),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
minmax, err := newMinMax()
require.NoError(b, err)
for n := 0; n < b.N; n++ {
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added.
func TestMinMaxWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax, err := newMinMax()
require.NoError(t, err)
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": int64(1),
"a_min": int64(1),
"b_max": int64(3),
"b_min": int64(1),
"c_max": int64(3),
"c_min": int64(1),
"d_max": int64(3),
"d_min": int64(1),
"e_max": int64(3),
"e_min": int64(1),
"f_max": int64(2),
"f_min": int64(1),
"g_max": int64(2),
"g_min": int64(1),
"h_max": int64(2),
"h_min": int64(1),
"i_max": int64(2),
"i_min": int64(1),
"j_max": int64(3),
"j_min": int64(1),
"k_max": int64(200),
"k_min": int64(200),
"l_max": int64(200),
"l_min": int64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestMinMaxDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax, err := newMinMax()
require.NoError(t, err)
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": int64(1),
"a_min": int64(1),
"b_max": int64(1),
"b_min": int64(1),
"c_max": int64(1),
"c_min": int64(1),
"d_max": int64(1),
"d_min": int64(1),
"e_max": int64(1),
"e_min": int64(1),
"f_max": int64(2),
"f_min": int64(2),
"g_max": int64(2),
"g_min": int64(2),
"h_max": int64(2),
"h_min": int64(2),
"i_max": int64(2),
"i_min": int64(2),
"j_max": int64(3),
"j_min": int64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_max": int64(1),
"a_min": int64(1),
"b_max": int64(3),
"b_min": int64(3),
"c_max": int64(3),
"c_min": int64(3),
"d_max": int64(3),
"d_min": int64(3),
"e_max": int64(3),
"e_min": int64(3),
"f_max": int64(1),
"f_min": int64(1),
"g_max": int64(1),
"g_min": int64(1),
"h_max": int64(1),
"h_min": int64(1),
"i_max": int64(1),
"i_min": int64(1),
"j_max": int64(1),
"j_min": int64(1),
"k_max": int64(200),
"k_min": int64(200),
"l_max": int64(200),
"l_min": int64(200),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
func newMinMax() (*Starlark, error) {
return newStarlarkFromScript("testdata/min_max.star")
}
func TestSimple(t *testing.T) {
plugin, err := newMerge()
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 0),
),
)
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 0),
),
)
require.NoError(t, err)
var acc testutil.Accumulator
plugin.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
"time_guest": 42,
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func TestNanosecondPrecision(t *testing.T) {
plugin, err := newMerge()
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 1),
),
)
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 1),
),
)
require.NoError(t, err)
var acc testutil.Accumulator
acc.SetPrecision(time.Second)
plugin.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
"time_guest": 42,
},
time.Unix(0, 1),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func TestReset(t *testing.T) {
plugin, err := newMerge()
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 0),
),
)
require.NoError(t, err)
var acc testutil.Accumulator
plugin.Push(&acc)
plugin.Reset()
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 0),
),
)
require.NoError(t, err)
plugin.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func newMerge() (*Starlark, error) {
return newStarlarkFromScript("testdata/merge.star")
}
func TestLastFromSource(t *testing.T) {
acc := testutil.Accumulator{}
plugin, err := newStarlarkFromSource(`
state = {}
def add(metric):
state["last"] = metric
def push():
return state.get("last")
def reset():
state.clear()
`)
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 0),
),
)
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu2",
},
map[string]interface{}{
"time_idle": 31,
},
time.Unix(0, 0),
),
)
require.NoError(t, err)
plugin.Push(&acc)
expectedFields := map[string]interface{}{
"time_idle": int64(31),
}
expectedTags := map[string]string{
"cpu": "cpu2",
}
acc.AssertContainsTaggedFields(t, "cpu", expectedFields, expectedTags)
plugin.Reset()
}
func newStarlarkFromSource(source string) (*Starlark, error) {
plugin := &Starlark{
Common: common.Common{
StarlarkLoadFunc: common.LoadFunc,
Log: testutil.Logger{},
Source: source,
},
}
err := plugin.Init()
if err != nil {
return nil, err
}
return plugin, nil
}
func newStarlarkFromScript(script string) (*Starlark, error) {
plugin := &Starlark{
Common: common.Common{
StarlarkLoadFunc: common.LoadFunc,
Log: testutil.Logger{},
Script: script,
},
}
err := plugin.Init()
if err != nil {
return nil, err
}
return plugin, nil
}

View file

@ -0,0 +1,31 @@
# Example of a merge aggregator implemented with a starlark script.
load('time.star', 'time')
state = {}
def add(metric):
metrics = state.get("metrics")
if metrics == None:
metrics = {}
state["metrics"] = metrics
state["ordered"] = []
gId = groupID(metric)
m = metrics.get(gId)
if m == None:
m = deepcopy(metric)
metrics[gId] = m
state["ordered"].append(m)
else:
for k, v in metric.fields.items():
m.fields[k] = v
def push():
return state.get("ordered")
def reset():
state.clear()
def groupID(metric):
key = metric.name + "-"
for k, v in metric.tags.items():
key = key + k + "-" + v + "-"
key = key + "-" + str(metric.time)
return hash(key)

View file

@ -0,0 +1,53 @@
# Example of a min_max aggregator implemented with a starlark script.
supported_types = (["int", "float"])
state = {}
def add(metric):
gId = groupID(metric)
aggregate = state.get(gId)
if aggregate == None:
aggregate = {
"name": metric.name,
"tags": metric.tags,
"fields": {}
}
for k, v in metric.fields.items():
if type(v) in supported_types:
aggregate["fields"][k] = {
"min": v,
"max": v,
}
state[gId] = aggregate
else:
for k, v in metric.fields.items():
if type(v) in supported_types:
min_max = aggregate["fields"].get(k)
if min_max == None:
aggregate["fields"][k] = {
"min": v,
"max": v,
}
elif v < min_max["min"]:
aggregate["fields"][k]["min"] = v
elif v > min_max["max"]:
aggregate["fields"][k]["max"] = v
def push():
metrics = []
for a in state:
fields = {}
for k in state[a]["fields"]:
fields[k + "_min"] = state[a]["fields"][k]["min"]
fields[k + "_max"] = state[a]["fields"][k]["max"]
m = Metric(state[a]["name"], state[a]["tags"], fields)
metrics.append(m)
return metrics
def reset():
state.clear()
def groupID(metric):
key = metric.name + "-"
for k, v in metric.tags.items():
key = key + k + "-" + v
return hash(key)

View file

@ -0,0 +1,90 @@
# Value Counter Aggregator Plugin
This plugin counts the occurrence of unique values in fields and emits the
counter once every `period` with the field-names being suffixed by the unique
value converted to `string`.
> [!NOTE]
> The fields to be counted must be configured using the `fields` setting,
> otherwise no field will be counted and no metric is emitted.
This plugin is useful to e.g. count the occurrances of HTTP status codes or
other categorical values in the defined `period`.
> [!IMPORTANT]
> Counting fields with a high number of potential values may produce a
> significant amounts of new fields and results in an increased memory usage.
> Take care to only count fields with a limited set of values.
⭐ Telegraf v1.8.0
🏷️ statistics
💻 all
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Count the occurrence of values in fields.
[[aggregators.valuecounter]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
## The fields for which the values will be counted
fields = ["status"]
```
### Measurements & Fields
- measurement1
- field_value1
- field_value2
### Tags
No tags are applied by this aggregator.
## Example Output
Example for parsing a HTTP access log.
telegraf.conf:
```toml
[[inputs.logparser]]
files = ["/tmp/tst.log"]
[inputs.logparser.grok]
patterns = ['%{DATA:url:tag} %{NUMBER:response:string}']
measurement = "access"
[[aggregators.valuecounter]]
namepass = ["access"]
fields = ["response"]
```
/tmp/tst.log
```text
/some/path 200
/some/path 401
/some/path 200
```
```text
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991487011
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="401" 1511948755991522282
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991531697
access,path=/tmp/tst.log,host=localhost.localdomain,url=/some/path response_200=2i,response_401=1i 1511948761000000000
```

View file

@ -0,0 +1,12 @@
# Count the occurrence of values in fields.
[[aggregators.valuecounter]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
## The fields for which the values will be counted
fields = ["status"]

View file

@ -0,0 +1,86 @@
//go:generate ../../../tools/readme_config_includer/generator
package valuecounter
import (
_ "embed"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
//go:embed sample.conf
var sampleConfig string
type aggregate struct {
name string
tags map[string]string
fieldCount map[string]int
}
// ValueCounter an aggregation plugin
type ValueCounter struct {
cache map[uint64]aggregate
Fields []string
}
// NewValueCounter create a new aggregation plugin which counts the occurrences
// of fields and emits the count.
func NewValueCounter() telegraf.Aggregator {
vc := &ValueCounter{}
vc.Reset()
return vc
}
func (*ValueCounter) SampleConfig() string {
return sampleConfig
}
// Add is run on every metric which passes the plugin
func (vc *ValueCounter) Add(in telegraf.Metric) {
id := in.HashID()
// Check if the cache already has an entry for this metric, if not create it
if _, ok := vc.cache[id]; !ok {
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fieldCount: make(map[string]int),
}
vc.cache[id] = a
}
// Check if this metric has fields which we need to count, if so increment
// the count.
for fk, fv := range in.Fields() {
for _, cf := range vc.Fields {
if fk == cf {
fn := fmt.Sprintf("%v_%v", fk, fv)
vc.cache[id].fieldCount[fn]++
}
}
}
}
// Push emits the counters
func (vc *ValueCounter) Push(acc telegraf.Accumulator) {
for _, agg := range vc.cache {
fields := make(map[string]interface{}, len(agg.fieldCount))
for field, count := range agg.fieldCount {
fields[field] = count
}
acc.AddFields(agg.name, fields, agg.tags)
}
}
// Reset the cache, executed after each push
func (vc *ValueCounter) Reset() {
vc.cache = make(map[uint64]aggregate)
}
func init() {
aggregators.Add("valuecounter", func() telegraf.Aggregator {
return NewValueCounter()
})
}

View file

@ -0,0 +1,125 @@
package valuecounter
import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
// Create a valuecounter with config
func NewTestValueCounter(fields []string) telegraf.Aggregator {
vc := &ValueCounter{
Fields: fields,
}
vc.Reset()
return vc
}
var m1 = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"status": 200,
"foobar": "bar",
},
time.Now(),
)
var m2 = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"status": "OK",
"ignoreme": "string",
"andme": true,
"boolfield": false,
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
vc := NewTestValueCounter([]string{"status"})
for n := 0; n < b.N; n++ {
vc.Add(m1)
vc.Add(m2)
}
}
// Test basic functionality
func TestBasic(t *testing.T) {
vc := NewTestValueCounter([]string{"status"})
acc := testutil.Accumulator{}
vc.Add(m1)
vc.Add(m2)
vc.Add(m1)
vc.Push(&acc)
expectedFields := map[string]interface{}{
"status_200": 2,
"status_OK": 1,
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test with multiple fields to count
func TestMultipleFields(t *testing.T) {
vc := NewTestValueCounter([]string{"status", "somefield", "boolfield"})
acc := testutil.Accumulator{}
vc.Add(m1)
vc.Add(m2)
vc.Add(m2)
vc.Add(m1)
vc.Push(&acc)
expectedFields := map[string]interface{}{
"status_200": 2,
"status_OK": 2,
"boolfield_false": 2,
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test with a reset between two runs
func TestWithReset(t *testing.T) {
vc := NewTestValueCounter([]string{"status"})
acc := testutil.Accumulator{}
vc.Add(m1)
vc.Add(m1)
vc.Add(m2)
vc.Push(&acc)
expectedFields := map[string]interface{}{
"status_200": 2,
"status_OK": 1,
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
vc.Reset()
vc.Add(m2)
vc.Add(m2)
vc.Add(m1)
vc.Push(&acc)
expectedFields = map[string]interface{}{
"status_200": 1,
"status_OK": 2,
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}