1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,74 @@
# Basic Statistics Aggregator Plugin
This plugin computes basic statistics such as counts, differences, minima,
maxima, mean values, non-negative differences etc. for a set of metrics and
emits these statistical values every `period`.
⭐ Telegraf v1.5.0
🏷️ statistics
💻 all
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Keep the aggregate basicstats of each metric passing through.
[[aggregators.basicstats]]
## The period on which to flush & clear the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
## Configures which basic stats to push as fields
# stats = ["count","min","max","mean","variance","stdev"]
```
- stats
- If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are
aggregated and pushed as fields. Other fields are not aggregated by default
to maintain backwards compatibility.
- If empty array, no stats are aggregated
## Measurements & Fields
- measurement1
- field1_count
- field1_diff (difference)
- field1_rate (rate per second)
- field1_max
- field1_min
- field1_mean
- field1_non_negative_diff (non-negative difference)
- field1_non_negative_rate (non-negative rate per second)
- field1_percent_change
- field1_sum
- field1_s2 (variance)
- field1_stdev (standard deviation)
- field1_interval (interval in nanoseconds)
- field1_last (last aggregated value)
- field1_first (first aggregated value)
## Tags
No tags are applied by this aggregator.
## Example Output
```text
system,host=tars load1=1 1475583980000000000
system,host=tars load1=1 1475583990000000000
system,host=tars load1_count=2,load1_diff=0,load1_rate=0,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0,load1_interval=10000000000i,load1_last=1 1475584010000000000
system,host=tars load1=1 1475584020000000000
system,host=tars load1=3 1475584030000000000
system,host=tars load1_count=2,load1_diff=2,load1_rate=0.2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162,load1_interval=10000000000i,load1_last=3,load1_first=3 1475584010000000000
```

View file

@ -0,0 +1,325 @@
//go:generate ../../../tools/readme_config_includer/generator
package basicstats
import (
_ "embed"
"math"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
//go:embed sample.conf
var sampleConfig string
type BasicStats struct {
Stats []string `toml:"stats"`
Log telegraf.Logger
cache map[uint64]aggregate
statsConfig *configuredStats
}
type configuredStats struct {
count bool
min bool
max bool
mean bool
variance bool
stdev bool
sum bool
diff bool
nonNegativeDiff bool
rate bool
nonNegativeRate bool
percentChange bool
interval bool
last bool
first bool
}
func NewBasicStats() *BasicStats {
return &BasicStats{
cache: make(map[uint64]aggregate),
}
}
type aggregate struct {
fields map[string]basicstats
name string
tags map[string]string
}
type basicstats struct {
count float64
min float64
max float64
sum float64
mean float64
diff float64
rate float64
interval time.Duration
last float64
first float64
M2 float64 // intermediate value for variance/stdev
PREVIOUS float64 // intermediate value for diff
TIME time.Time // intermediate value for rate
}
func (*BasicStats) SampleConfig() string {
return sampleConfig
}
func (b *BasicStats) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := b.cache[id]; !ok {
// hit an uncached metric, create caches for first time:
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fields: make(map[string]basicstats),
}
for _, field := range in.FieldList() {
if fv, ok := convert(field.Value); ok {
a.fields[field.Key] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
sum: fv,
diff: 0.0,
rate: 0.0,
last: fv,
first: fv,
M2: 0.0,
PREVIOUS: fv,
TIME: in.Time(),
}
}
}
b.cache[id] = a
} else {
for _, field := range in.FieldList() {
if fv, ok := convert(field.Value); ok {
if _, ok := b.cache[id].fields[field.Key]; !ok {
// hit an uncached field of a cached metric
b.cache[id].fields[field.Key] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
sum: fv,
diff: 0.0,
rate: 0.0,
interval: 0,
last: fv,
first: fv,
M2: 0.0,
PREVIOUS: fv,
TIME: in.Time(),
}
continue
}
tmp := b.cache[id].fields[field.Key]
// https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance
// variable initialization
x := fv
mean := tmp.mean
m2 := tmp.M2
// counter compute
n := tmp.count + 1
tmp.count = n
// mean compute
delta := x - mean
mean = mean + delta/n
tmp.mean = mean
// variance/stdev compute
m2 = m2 + delta*(x-mean)
tmp.M2 = m2
// max/min compute
if fv < tmp.min {
tmp.min = fv
} else if fv > tmp.max {
tmp.max = fv
}
// sum compute
tmp.sum += fv
// diff compute
tmp.diff = fv - tmp.PREVIOUS
// interval compute
tmp.interval = in.Time().Sub(tmp.TIME)
// rate compute
if !in.Time().Equal(tmp.TIME) {
tmp.rate = tmp.diff / tmp.interval.Seconds()
}
// last compute
tmp.last = fv
// store final data
b.cache[id].fields[field.Key] = tmp
}
}
}
}
func (b *BasicStats) Push(acc telegraf.Accumulator) {
for _, aggregate := range b.cache {
fields := make(map[string]interface{})
for k, v := range aggregate.fields {
if b.statsConfig.count {
fields[k+"_count"] = v.count
}
if b.statsConfig.min {
fields[k+"_min"] = v.min
}
if b.statsConfig.max {
fields[k+"_max"] = v.max
}
if b.statsConfig.mean {
fields[k+"_mean"] = v.mean
}
if b.statsConfig.sum {
fields[k+"_sum"] = v.sum
}
if b.statsConfig.last {
fields[k+"_last"] = v.last
}
if b.statsConfig.first {
fields[k+"_first"] = v.first
}
// v.count always >=1
if v.count > 1 {
variance := v.M2 / (v.count - 1)
if b.statsConfig.variance {
fields[k+"_s2"] = variance
}
if b.statsConfig.stdev {
fields[k+"_stdev"] = math.Sqrt(variance)
}
if b.statsConfig.diff {
fields[k+"_diff"] = v.diff
}
if b.statsConfig.nonNegativeDiff && v.diff >= 0 {
fields[k+"_non_negative_diff"] = v.diff
}
if b.statsConfig.rate {
fields[k+"_rate"] = v.rate
}
if b.statsConfig.percentChange {
fields[k+"_percent_change"] = v.diff / v.PREVIOUS * 100
}
if b.statsConfig.nonNegativeRate && v.diff >= 0 {
fields[k+"_non_negative_rate"] = v.rate
}
if b.statsConfig.interval {
fields[k+"_interval"] = v.interval.Nanoseconds()
}
}
// if count == 1 StdDev = infinite => so I won't send data
}
if len(fields) > 0 {
acc.AddFields(aggregate.name, fields, aggregate.tags)
}
}
}
// member function for logging.
func (b *BasicStats) parseStats() *configuredStats {
parsed := &configuredStats{}
for _, name := range b.Stats {
switch name {
case "count":
parsed.count = true
case "min":
parsed.min = true
case "max":
parsed.max = true
case "mean":
parsed.mean = true
case "s2":
parsed.variance = true
case "stdev":
parsed.stdev = true
case "sum":
parsed.sum = true
case "diff":
parsed.diff = true
case "non_negative_diff":
parsed.nonNegativeDiff = true
case "rate":
parsed.rate = true
case "non_negative_rate":
parsed.nonNegativeRate = true
case "percent_change":
parsed.percentChange = true
case "interval":
parsed.interval = true
case "last":
parsed.last = true
case "first":
parsed.first = true
default:
b.Log.Warnf("Unrecognized basic stat %q, ignoring", name)
}
}
return parsed
}
func (b *BasicStats) initConfiguredStats() {
if b.Stats == nil {
b.statsConfig = &configuredStats{
count: true,
min: true,
max: true,
mean: true,
variance: true,
stdev: true,
sum: false,
diff: false,
nonNegativeDiff: false,
rate: false,
nonNegativeRate: false,
percentChange: false,
interval: false,
last: false,
first: false,
}
} else {
b.statsConfig = b.parseStats()
}
}
func (b *BasicStats) Reset() {
b.cache = make(map[uint64]aggregate)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
case uint64:
return float64(v), true
default:
return 0, false
}
}
func (b *BasicStats) Init() error {
b.initConfiguredStats()
return nil
}
func init() {
aggregators.Add("basicstats", func() telegraf.Aggregator {
return NewBasicStats()
})
}

View file

@ -0,0 +1,826 @@
package basicstats
import (
"math"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
var m1 = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(1),
"c": float64(2),
"d": float64(2),
"g": int64(3),
},
time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC),
)
var m2 = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(3),
"c": float64(4),
"d": float64(6),
"e": float64(200),
"f": uint64(200),
"ignoreme": "string",
"andme": true,
"g": int64(1),
},
time.Date(2000, 1, 1, 0, 0, 0, 1e6, time.UTC),
)
func BenchmarkApply(b *testing.B) {
minmax := NewBasicStats()
minmax.Log = testutil.Logger{}
minmax.initConfiguredStats()
for n := 0; n < b.N; n++ {
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added.
func TestBasicStatsWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Log = testutil.Logger{}
minmax.initConfiguredStats()
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2), // a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_stdev": float64(0),
"a_s2": float64(0),
"b_count": float64(2), // b
"b_max": float64(3),
"b_min": float64(1),
"b_mean": float64(2),
"b_s2": float64(2),
"b_stdev": math.Sqrt(2),
"c_count": float64(2), // c
"c_max": float64(4),
"c_min": float64(2),
"c_mean": float64(3),
"c_s2": float64(2),
"c_stdev": math.Sqrt(2),
"d_count": float64(2), // d
"d_max": float64(6),
"d_min": float64(2),
"d_mean": float64(4),
"d_s2": float64(8),
"d_stdev": math.Sqrt(8),
"e_count": float64(1), // e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
"f_count": float64(1), // f
"f_max": float64(200),
"f_min": float64(200),
"f_mean": float64(200),
"g_count": float64(2), // g
"g_max": float64(3),
"g_min": float64(1),
"g_mean": float64(2),
"g_s2": float64(2),
"g_stdev": math.Sqrt(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestBasicStatsDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Stats = []string{"count", "max", "min", "mean", "last", "first"}
minmax.Log = testutil.Logger{}
minmax.initConfiguredStats()
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(1), // a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_last": float64(1),
"a_first": float64(1),
"b_count": float64(1), // b
"b_max": float64(1),
"b_min": float64(1),
"b_mean": float64(1),
"b_last": float64(1),
"b_first": float64(1),
"c_count": float64(1), // c
"c_max": float64(2),
"c_min": float64(2),
"c_mean": float64(2),
"c_last": float64(2),
"c_first": float64(2),
"d_count": float64(1), // d
"d_max": float64(2),
"d_min": float64(2),
"d_mean": float64(2),
"d_last": float64(2),
"d_first": float64(2),
"g_count": float64(1), // g
"g_max": float64(3),
"g_min": float64(3),
"g_mean": float64(3),
"g_last": float64(3),
"g_first": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_count": float64(1), // a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_last": float64(1),
"a_first": float64(1),
"b_count": float64(1), // b
"b_max": float64(3),
"b_min": float64(3),
"b_mean": float64(3),
"b_last": float64(3),
"b_first": float64(3),
"c_count": float64(1), // c
"c_max": float64(4),
"c_min": float64(4),
"c_mean": float64(4),
"c_last": float64(4),
"c_first": float64(4),
"d_count": float64(1), // d
"d_max": float64(6),
"d_min": float64(6),
"d_mean": float64(6),
"d_last": float64(6),
"d_first": float64(6),
"e_count": float64(1), // e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
"e_last": float64(200),
"e_first": float64(200),
"f_count": float64(1), // f
"f_max": float64(200),
"f_min": float64(200),
"f_mean": float64(200),
"f_last": float64(200),
"f_first": float64(200),
"g_count": float64(1), // g
"g_max": float64(1),
"g_min": float64(1),
"g_mean": float64(1),
"g_last": float64(1),
"g_first": float64(1),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating count
func TestBasicStatsWithOnlyCount(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"count"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2),
"b_count": float64(2),
"c_count": float64(2),
"d_count": float64(2),
"e_count": float64(1),
"f_count": float64(1),
"g_count": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating minimum
func TestBasicStatsWithOnlyMin(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"min"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_min": float64(1),
"b_min": float64(1),
"c_min": float64(2),
"d_min": float64(2),
"e_min": float64(200),
"f_min": float64(200),
"g_min": float64(1),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating maximum
func TestBasicStatsWithOnlyMax(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"max"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1),
"b_max": float64(3),
"c_max": float64(4),
"d_max": float64(6),
"e_max": float64(200),
"f_max": float64(200),
"g_max": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating mean
func TestBasicStatsWithOnlyMean(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"mean"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_mean": float64(1),
"b_mean": float64(2),
"c_mean": float64(3),
"d_mean": float64(4),
"e_mean": float64(200),
"f_mean": float64(200),
"g_mean": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating sum
func TestBasicStatsWithOnlySum(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"sum"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_sum": float64(2),
"b_sum": float64(4),
"c_sum": float64(6),
"d_sum": float64(8),
"e_sum": float64(200),
"f_sum": float64(200),
"g_sum": float64(4),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Verify that sum doesn't suffer from floating point errors. Early
// implementations of sum were calculated from mean and count, which
// e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8.
func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) {
var sum1 = metric.New("m1",
map[string]string{},
map[string]interface{}{
"a": int64(1),
},
time.Now(),
)
var sum2 = metric.New("m1",
map[string]string{},
map[string]interface{}{
"a": int64(1),
},
time.Now(),
)
var sum3 = metric.New("m1",
map[string]string{},
map[string]interface{}{
"a": int64(5),
},
time.Now(),
)
var sum4 = metric.New("m1",
map[string]string{},
map[string]interface{}{
"a": int64(1),
},
time.Now(),
)
aggregator := NewBasicStats()
aggregator.Stats = []string{"sum"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(sum1)
aggregator.Add(sum2)
aggregator.Add(sum3)
aggregator.Add(sum4)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_sum": float64(8),
}
expectedTags := map[string]string{}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating variance
func TestBasicStatsWithOnlyVariance(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"s2"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_s2": float64(0),
"b_s2": float64(2),
"c_s2": float64(2),
"d_s2": float64(8),
"g_s2": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating standard deviation
func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"stdev"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_stdev": float64(0),
"b_stdev": math.Sqrt(2),
"c_stdev": math.Sqrt(2),
"d_stdev": math.Sqrt(8),
"g_stdev": math.Sqrt(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating minimum and maximum
func TestBasicStatsWithMinAndMax(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"min", "max"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1), // a
"a_min": float64(1),
"b_max": float64(3), // b
"b_min": float64(1),
"c_max": float64(4), // c
"c_min": float64(2),
"d_max": float64(6), // d
"d_min": float64(2),
"e_max": float64(200), // e
"e_min": float64(200),
"f_max": float64(200), // f
"f_min": float64(200),
"g_max": float64(3), // g
"g_min": float64(1),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating diff
func TestBasicStatsWithDiff(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"diff"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_diff": float64(0),
"b_diff": float64(2),
"c_diff": float64(2),
"d_diff": float64(4),
"g_diff": float64(-2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
func TestBasicStatsWithRate(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"rate"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_rate": float64(0),
"b_rate": float64(2000),
"c_rate": float64(2000),
"d_rate": float64(4000),
"g_rate": float64(-2000),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
func TestBasicStatsWithNonNegativeRate(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"non_negative_rate"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_non_negative_rate": float64(0),
"b_non_negative_rate": float64(2000),
"c_non_negative_rate": float64(2000),
"d_non_negative_rate": float64(4000),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
func TestBasicStatsWithPctChange(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"percent_change"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_percent_change": float64(0),
"b_percent_change": float64(200),
"c_percent_change": float64(100),
"d_percent_change": float64(200),
"g_percent_change": float64(-66.66666666666666),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
func TestBasicStatsWithInterval(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"interval"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_interval": int64(time.Millisecond),
"b_interval": int64(time.Millisecond),
"c_interval": int64(time.Millisecond),
"d_interval": int64(time.Millisecond),
"g_interval": int64(time.Millisecond),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating non_negative_diff
func TestBasicStatsWithNonNegativeDiff(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"non_negative_diff"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_non_negative_diff": float64(0),
"b_non_negative_diff": float64(2),
"c_non_negative_diff": float64(2),
"d_non_negative_diff": float64(4),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test aggregating with all stats
func TestBasicStatsWithAllStats(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Log = testutil.Logger{}
minmax.Stats = []string{"count", "min", "max", "mean", "stdev", "s2", "sum", "last", "first"}
minmax.initConfiguredStats()
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2), // a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_stdev": float64(0),
"a_s2": float64(0),
"a_sum": float64(2),
"a_last": float64(1),
"a_first": float64(1),
"b_count": float64(2), // b
"b_max": float64(3),
"b_min": float64(1),
"b_mean": float64(2),
"b_s2": float64(2),
"b_sum": float64(4),
"b_last": float64(3),
"b_stdev": math.Sqrt(2),
"b_first": float64(1),
"c_count": float64(2), // c
"c_max": float64(4),
"c_min": float64(2),
"c_mean": float64(3),
"c_s2": float64(2),
"c_stdev": math.Sqrt(2),
"c_sum": float64(6),
"c_last": float64(4),
"c_first": float64(2),
"d_count": float64(2), // d
"d_max": float64(6),
"d_min": float64(2),
"d_mean": float64(4),
"d_s2": float64(8),
"d_stdev": math.Sqrt(8),
"d_sum": float64(8),
"d_last": float64(6),
"d_first": float64(2),
"e_count": float64(1), // e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
"e_sum": float64(200),
"e_last": float64(200),
"e_first": float64(200),
"f_count": float64(1), // f
"f_max": float64(200),
"f_min": float64(200),
"f_mean": float64(200),
"f_sum": float64(200),
"f_last": float64(200),
"f_first": float64(200),
"g_count": float64(2), // g
"g_max": float64(3),
"g_min": float64(1),
"g_mean": float64(2),
"g_s2": float64(2),
"g_stdev": math.Sqrt(2),
"g_sum": float64(4),
"g_last": float64(1),
"g_first": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test that if an empty array is passed, no points are pushed
func TestBasicStatsWithNoStats(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = make([]string, 0)
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
acc.AssertDoesNotContainMeasurement(t, "m1")
}
// Test that if an unknown stat is configured, it doesn't explode
func TestBasicStatsWithUnknownStat(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"crazy"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
acc.AssertDoesNotContainMeasurement(t, "m1")
}
// Test that if Stats isn't supplied, then we only do count, min, max, mean,
// stdev, and s2. We purposely exclude sum for backwards compatibility,
// otherwise user's working systems will suddenly (and surprisingly) start
// capturing sum without their input.
func TestBasicStatsWithDefaultStats(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
require.True(t, acc.HasField("m1", "a_count"))
require.True(t, acc.HasField("m1", "a_min"))
require.True(t, acc.HasField("m1", "a_max"))
require.True(t, acc.HasField("m1", "a_mean"))
require.True(t, acc.HasField("m1", "a_stdev"))
require.True(t, acc.HasField("m1", "a_s2"))
require.False(t, acc.HasField("m1", "a_sum"))
}
func TestBasicStatsWithOnlyLast(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"last"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_last": float64(1),
"b_last": float64(3),
"c_last": float64(4),
"d_last": float64(6),
"e_last": float64(200),
"f_last": float64(200),
"g_last": float64(1),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
func TestBasicStatsWithOnlyFirst(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"first"}
aggregator.Log = testutil.Logger{}
aggregator.initConfiguredStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_first": float64(1),
"b_first": float64(1),
"c_first": float64(2),
"d_first": float64(2),
"e_first": float64(200),
"f_first": float64(200),
"g_first": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}

View file

@ -0,0 +1,11 @@
# Keep the aggregate basicstats of each metric passing through.
[[aggregators.basicstats]]
## The period on which to flush & clear the aggregator.
# period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
# drop_original = false
## Configures which basic stats to push as fields
# stats = ["count","min","max","mean","variance","stdev"]