Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
83
plugins/processors/scale/README.md
Normal file
83
plugins/processors/scale/README.md
Normal file
|
@ -0,0 +1,83 @@
|
|||
# Scale Processor Plugin
|
||||
|
||||
The scale processor filters for a set of fields,
|
||||
and scales the respective values from an input range into
|
||||
the given output range according to this formula:
|
||||
|
||||
```math
|
||||
\text{result}=(\text{value}-\text{input\_minimum})\cdot\frac{(\text{output\_maximum}-\text{output\_minimum})}
|
||||
{(\text{input\_maximum}-\text{input\_minimum})} +
|
||||
\text{output\_minimum}
|
||||
```
|
||||
|
||||
Alternatively, you can apply a factor and offset to the input according to
|
||||
this formula
|
||||
|
||||
```math
|
||||
\text{result}=\text{factor} \cdot \text{value} + \text{offset}
|
||||
```
|
||||
|
||||
Input fields are converted to floating point values if possible. Otherwise,
|
||||
fields that cannot be converted are ignored and keep their original value.
|
||||
|
||||
**Please note:** Neither the input nor the output values are clipped to their
|
||||
respective ranges!
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Scale values with a predefined range to a different output range.
|
||||
[[processors.scale]]
|
||||
## It is possible to define multiple different scaling that can be applied
|
||||
## do different sets of fields. Each scaling expects the following
|
||||
## arguments:
|
||||
## - input_minimum: Minimum expected input value
|
||||
## - input_maximum: Maximum expected input value
|
||||
## - output_minimum: Minimum desired output value
|
||||
## - output_maximum: Maximum desired output value
|
||||
## alternatively you can specify a scaling with factor and offset
|
||||
## - factor: factor to scale the input value with
|
||||
## - offset: additive offset for value after scaling
|
||||
## - fields: a list of field names (or filters) to apply this scaling to
|
||||
|
||||
## Example: Scaling with minimum and maximum values
|
||||
# [[processors.scale.scaling]]
|
||||
# input_minimum = 0.0
|
||||
# input_maximum = 1.0
|
||||
# output_minimum = 0.0
|
||||
# output_maximum = 100.0
|
||||
# fields = ["temperature1", "temperature2"]
|
||||
|
||||
## Example: Scaling with factor and offset
|
||||
# [[processors.scale.scaling]]
|
||||
# factor = 10.0
|
||||
# offset = -5.0
|
||||
# fields = ["voltage*"]
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
The example below uses these scaling values:
|
||||
|
||||
```toml
|
||||
[[processors.scale.scaling]]
|
||||
input_minimum = 0.0
|
||||
input_maximum = 50.0
|
||||
output_minimum = 50.0
|
||||
output_maximum = 100.0
|
||||
fields = ["cpu"]
|
||||
```
|
||||
|
||||
```diff
|
||||
- temperature, cpu=25
|
||||
+ temperature, cpu=75.0
|
||||
```
|
27
plugins/processors/scale/sample.conf
Normal file
27
plugins/processors/scale/sample.conf
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Scale values with a predefined range to a different output range.
|
||||
[[processors.scale]]
|
||||
## It is possible to define multiple different scaling that can be applied
|
||||
## do different sets of fields. Each scaling expects the following
|
||||
## arguments:
|
||||
## - input_minimum: Minimum expected input value
|
||||
## - input_maximum: Maximum expected input value
|
||||
## - output_minimum: Minimum desired output value
|
||||
## - output_maximum: Maximum desired output value
|
||||
## alternatively you can specify a scaling with factor and offset
|
||||
## - factor: factor to scale the input value with
|
||||
## - offset: additive offset for value after scaling
|
||||
## - fields: a list of field names (or filters) to apply this scaling to
|
||||
|
||||
## Example: Scaling with minimum and maximum values
|
||||
# [[processors.scale.scaling]]
|
||||
# input_minimum = 0.0
|
||||
# input_maximum = 1.0
|
||||
# output_minimum = 0.0
|
||||
# output_maximum = 100.0
|
||||
# fields = ["temperature1", "temperature2"]
|
||||
|
||||
## Example: Scaling with factor and offset
|
||||
# [[processors.scale.scaling]]
|
||||
# factor = 10.0
|
||||
# offset = -5.0
|
||||
# fields = ["voltage*"]
|
147
plugins/processors/scale/scale.go
Normal file
147
plugins/processors/scale/scale.go
Normal file
|
@ -0,0 +1,147 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package scale
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
func (*Scale) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
type Scaling struct {
|
||||
InMin *float64 `toml:"input_minimum"`
|
||||
InMax *float64 `toml:"input_maximum"`
|
||||
OutMin *float64 `toml:"output_minimum"`
|
||||
OutMax *float64 `toml:"output_maximum"`
|
||||
Factor *float64 `toml:"factor"`
|
||||
Offset *float64 `toml:"offset"`
|
||||
Fields []string `toml:"fields"`
|
||||
|
||||
fieldFilter filter.Filter
|
||||
scale float64
|
||||
shiftIn float64
|
||||
shiftOut float64
|
||||
}
|
||||
|
||||
type Scale struct {
|
||||
Scalings []Scaling `toml:"scaling"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
func (s *Scaling) Init() error {
|
||||
s.scale, s.shiftOut, s.shiftIn = float64(1.0), float64(0.0), float64(0.0)
|
||||
allMinMaxSet := s.OutMax != nil && s.OutMin != nil && s.InMax != nil && s.InMin != nil
|
||||
anyMinMaxSet := s.OutMax != nil || s.OutMin != nil || s.InMax != nil || s.InMin != nil
|
||||
factorSet := s.Factor != nil || s.Offset != nil
|
||||
if anyMinMaxSet && factorSet {
|
||||
return fmt.Errorf("cannot use factor/offset and minimum/maximum at the same time for fields %s",
|
||||
strings.Join(s.Fields, ","))
|
||||
} else if anyMinMaxSet && !allMinMaxSet {
|
||||
return fmt.Errorf("all minimum and maximum values need to be set for fields %s", strings.Join(s.Fields, ","))
|
||||
} else if !anyMinMaxSet && !factorSet {
|
||||
return fmt.Errorf("no scaling defined for fields %s", strings.Join(s.Fields, ","))
|
||||
} else if allMinMaxSet {
|
||||
if *s.InMax == *s.InMin {
|
||||
return fmt.Errorf("input minimum and maximum are equal for fields %s", strings.Join(s.Fields, ","))
|
||||
}
|
||||
|
||||
if *s.OutMax == *s.OutMin {
|
||||
return fmt.Errorf("output minimum and maximum are equal for fields %s", strings.Join(s.Fields, ","))
|
||||
}
|
||||
|
||||
s.scale = (*s.OutMax - *s.OutMin) / (*s.InMax - *s.InMin)
|
||||
s.shiftOut = *s.OutMin
|
||||
s.shiftIn = *s.InMin
|
||||
} else {
|
||||
if s.Factor != nil {
|
||||
s.scale = *s.Factor
|
||||
}
|
||||
if s.Offset != nil {
|
||||
s.shiftOut = *s.Offset
|
||||
}
|
||||
}
|
||||
|
||||
scalingFilter, err := filter.Compile(s.Fields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compile fields filter: %w", err)
|
||||
}
|
||||
s.fieldFilter = scalingFilter
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// scale a float according to the input and output range
|
||||
func (s *Scaling) process(value float64) float64 {
|
||||
return s.scale*(value-s.shiftIn) + s.shiftOut
|
||||
}
|
||||
|
||||
func (s *Scale) Init() error {
|
||||
if s.Scalings == nil {
|
||||
return errors.New("no valid scaling defined")
|
||||
}
|
||||
|
||||
allFields := make(map[string]bool)
|
||||
for i := range s.Scalings {
|
||||
for _, field := range s.Scalings[i].Fields {
|
||||
// only generate a warning for the first duplicate field filter
|
||||
if warn, ok := allFields[field]; ok && warn {
|
||||
s.Log.Warnf("Filter field %q used twice in scalings", field)
|
||||
allFields[field] = false
|
||||
} else {
|
||||
allFields[field] = true
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.Scalings[i].Init(); err != nil {
|
||||
return fmt.Errorf("scaling %d: %w", i+1, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handle the scaling process
|
||||
func (s *Scale) scaleValues(metric telegraf.Metric) {
|
||||
fields := metric.FieldList()
|
||||
|
||||
for _, scaling := range s.Scalings {
|
||||
for _, field := range fields {
|
||||
if !scaling.fieldFilter.Match(field.Key) {
|
||||
continue
|
||||
}
|
||||
|
||||
v, err := internal.ToFloat64(field.Value)
|
||||
if err != nil {
|
||||
s.Log.Errorf("Error converting %q to float: %v", field.Key, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// scale the field values using the defined scaler
|
||||
field.Value = scaling.process(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scale) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||
for _, metric := range in {
|
||||
s.scaleValues(metric)
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
func init() {
|
||||
processors.Add("scale", func() telegraf.Processor {
|
||||
return &Scale{}
|
||||
})
|
||||
}
|
552
plugins/processors/scale/scale_test.go
Normal file
552
plugins/processors/scale/scale_test.go
Normal file
|
@ -0,0 +1,552 @@
|
|||
package scale
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
type scalingValuesMinMax struct {
|
||||
InMin float64
|
||||
InMax float64
|
||||
OutMin float64
|
||||
OutMax float64
|
||||
Fields []string
|
||||
}
|
||||
|
||||
type scalingValuesFactor struct {
|
||||
Factor float64
|
||||
Offset float64
|
||||
Fields []string
|
||||
}
|
||||
|
||||
func TestMinMax(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scale []scalingValuesMinMax
|
||||
inputs []telegraf.Metric
|
||||
expected []telegraf.Metric
|
||||
}{
|
||||
{
|
||||
name: "Field Scaling",
|
||||
scale: []scalingValuesMinMax{
|
||||
{
|
||||
InMin: -1,
|
||||
InMax: 1,
|
||||
OutMin: 0,
|
||||
OutMax: 100,
|
||||
Fields: []string{"test1", "test2"},
|
||||
},
|
||||
{
|
||||
InMin: -5,
|
||||
InMax: 0,
|
||||
OutMin: 1,
|
||||
OutMax: 10,
|
||||
Fields: []string{"test3", "test4"},
|
||||
},
|
||||
},
|
||||
inputs: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": int64(0),
|
||||
"test2": uint64(1),
|
||||
}, time.Unix(0, 0)),
|
||||
testutil.MustMetric("Name2", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": "0.5",
|
||||
"test2": float32(-0.5),
|
||||
}, time.Unix(0, 0)),
|
||||
testutil.MustMetric("Name3", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test3": int64(-3),
|
||||
"test4": uint64(0),
|
||||
}, time.Unix(0, 0)),
|
||||
testutil.MustMetric("Name4", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test3": int64(-5),
|
||||
"test4": float32(-0.5),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": float64(50),
|
||||
"test2": float64(100),
|
||||
}, time.Unix(0, 0)),
|
||||
testutil.MustMetric("Name2", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": float64(75),
|
||||
"test2": float32(25),
|
||||
}, time.Unix(0, 0)),
|
||||
testutil.MustMetric("Name3", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test3": float64(4.6),
|
||||
"test4": float64(10),
|
||||
}, time.Unix(0, 0)),
|
||||
testutil.MustMetric("Name4", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test3": float64(1),
|
||||
"test4": float64(9.1),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ignored Fields",
|
||||
scale: []scalingValuesMinMax{
|
||||
{
|
||||
InMin: -1,
|
||||
InMax: 1,
|
||||
OutMin: 0,
|
||||
OutMax: 100,
|
||||
Fields: []string{"test1", "test2"},
|
||||
},
|
||||
},
|
||||
inputs: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": int64(0),
|
||||
"test2": uint64(1),
|
||||
"test3": int64(1),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": float64(50),
|
||||
"test2": float64(100),
|
||||
"test3": int64(1),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Out of range tests",
|
||||
scale: []scalingValuesMinMax{
|
||||
{
|
||||
InMin: -1,
|
||||
InMax: 1,
|
||||
OutMin: 0,
|
||||
OutMax: 100,
|
||||
Fields: []string{"test1", "test2"},
|
||||
},
|
||||
},
|
||||
inputs: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": int64(-2),
|
||||
"test2": uint64(2),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": float64(-50),
|
||||
"test2": float64(150),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Missing field Fields",
|
||||
scale: []scalingValuesMinMax{
|
||||
{
|
||||
InMin: -1,
|
||||
InMax: 1,
|
||||
OutMin: 0,
|
||||
OutMax: 100,
|
||||
Fields: []string{"test1", "test2"},
|
||||
},
|
||||
},
|
||||
inputs: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": int64(0),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": float64(50),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
plugin := &Scale{
|
||||
Scalings: make([]Scaling, 0, len(tt.scale)),
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
for i := range tt.scale {
|
||||
plugin.Scalings = append(plugin.Scalings, Scaling{
|
||||
InMin: &tt.scale[i].InMin,
|
||||
InMax: &tt.scale[i].InMax,
|
||||
OutMin: &tt.scale[i].OutMin,
|
||||
OutMax: &tt.scale[i].OutMax,
|
||||
Fields: tt.scale[i].Fields,
|
||||
})
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
actual := plugin.Apply(tt.inputs...)
|
||||
|
||||
testutil.RequireMetricsEqual(t, tt.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFactor(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scale []scalingValuesFactor
|
||||
inputs []telegraf.Metric
|
||||
expected []telegraf.Metric
|
||||
}{
|
||||
{
|
||||
name: "Field Scaling",
|
||||
scale: []scalingValuesFactor{
|
||||
{
|
||||
Factor: 50.0,
|
||||
Offset: 50.0,
|
||||
Fields: []string{"test1", "test2"},
|
||||
},
|
||||
{
|
||||
Factor: 1.6,
|
||||
Offset: 9.0,
|
||||
Fields: []string{"test3", "test4"},
|
||||
},
|
||||
},
|
||||
inputs: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": int64(0),
|
||||
"test2": uint64(1),
|
||||
}, time.Unix(0, 0)),
|
||||
testutil.MustMetric("Name2", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": "0.5",
|
||||
"test2": float32(-0.5),
|
||||
}, time.Unix(0, 0)),
|
||||
testutil.MustMetric("Name3", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test3": int64(-3),
|
||||
"test4": uint64(0),
|
||||
}, time.Unix(0, 0)),
|
||||
testutil.MustMetric("Name4", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test3": int64(-5),
|
||||
"test4": float32(-0.5),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": float64(50),
|
||||
"test2": float64(100),
|
||||
}, time.Unix(0, 0)),
|
||||
testutil.MustMetric("Name2", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": float64(75),
|
||||
"test2": float32(25),
|
||||
}, time.Unix(0, 0)),
|
||||
testutil.MustMetric("Name3", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test3": float64(4.2),
|
||||
"test4": float64(9),
|
||||
}, time.Unix(0, 0)),
|
||||
testutil.MustMetric("Name4", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test3": float64(1),
|
||||
"test4": float64(8.2),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ignored Fields",
|
||||
scale: []scalingValuesFactor{
|
||||
{
|
||||
Factor: 50.0,
|
||||
Offset: 50.0,
|
||||
Fields: []string{"test1", "test2"},
|
||||
},
|
||||
},
|
||||
inputs: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": int64(0),
|
||||
"test2": uint64(1),
|
||||
"test3": int64(1),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": float64(50),
|
||||
"test2": float64(100),
|
||||
"test3": int64(1),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Missing field Fields",
|
||||
scale: []scalingValuesFactor{
|
||||
{
|
||||
Factor: 50.0,
|
||||
Offset: 50.0,
|
||||
Fields: []string{"test1", "test2"},
|
||||
},
|
||||
},
|
||||
inputs: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": int64(0),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": float64(50),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "No Offset",
|
||||
scale: []scalingValuesFactor{
|
||||
{
|
||||
Factor: 50.0,
|
||||
Fields: []string{"test1"},
|
||||
},
|
||||
},
|
||||
inputs: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": int64(1),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": float64(50),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "No Factor",
|
||||
scale: []scalingValuesFactor{
|
||||
{
|
||||
Offset: 50.0,
|
||||
Fields: []string{"test1"},
|
||||
},
|
||||
},
|
||||
inputs: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": int64(1),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric("Name1", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"test1": float64(51),
|
||||
}, time.Unix(0, 0)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
plugin := &Scale{
|
||||
Scalings: make([]Scaling, 0, len(tt.scale)),
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
for i := range tt.scale {
|
||||
s := Scaling{
|
||||
Fields: tt.scale[i].Fields,
|
||||
}
|
||||
if tt.scale[i].Factor != 0.0 {
|
||||
s.Factor = &tt.scale[i].Factor
|
||||
}
|
||||
if tt.scale[i].Offset != 0.0 {
|
||||
s.Offset = &tt.scale[i].Offset
|
||||
}
|
||||
plugin.Scalings = append(plugin.Scalings, s)
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
actual := plugin.Apply(tt.inputs...)
|
||||
|
||||
testutil.RequireMetricsEqual(t, tt.expected, actual, cmpopts.EquateApprox(0, 1e-6))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorCasesMinMax(t *testing.T) {
|
||||
a0, a1, a100 := float64(0.0), float64(1.0), float64(100.0)
|
||||
tests := []struct {
|
||||
name string
|
||||
scaling []Scaling
|
||||
fields []string
|
||||
expectedErrorMsg string
|
||||
}{
|
||||
{
|
||||
name: "Same input range values",
|
||||
scaling: []Scaling{
|
||||
{
|
||||
InMin: &a1,
|
||||
InMax: &a1,
|
||||
OutMin: &a0,
|
||||
OutMax: &a100,
|
||||
Fields: []string{"test"},
|
||||
},
|
||||
},
|
||||
fields: []string{"test"},
|
||||
expectedErrorMsg: "input minimum and maximum are equal for fields test",
|
||||
},
|
||||
{
|
||||
name: "Same input range values",
|
||||
scaling: []Scaling{
|
||||
{
|
||||
InMin: &a0,
|
||||
InMax: &a1,
|
||||
OutMin: &a100,
|
||||
OutMax: &a100,
|
||||
Fields: []string{"test"},
|
||||
},
|
||||
},
|
||||
fields: []string{"test"},
|
||||
expectedErrorMsg: "output minimum and maximum are equal",
|
||||
},
|
||||
{
|
||||
name: "Nothing set",
|
||||
scaling: []Scaling{
|
||||
{
|
||||
Fields: []string{"test"},
|
||||
},
|
||||
},
|
||||
fields: []string{"test"},
|
||||
expectedErrorMsg: "no scaling defined",
|
||||
},
|
||||
{
|
||||
name: "Partial minimum and maximum",
|
||||
scaling: []Scaling{
|
||||
{
|
||||
InMin: &a0,
|
||||
Fields: []string{"test"},
|
||||
},
|
||||
},
|
||||
fields: []string{"test"},
|
||||
expectedErrorMsg: "all minimum and maximum values need to be set",
|
||||
},
|
||||
{
|
||||
name: "Mixed minimum, maximum and factor",
|
||||
scaling: []Scaling{
|
||||
{
|
||||
InMin: &a0,
|
||||
InMax: &a1,
|
||||
OutMin: &a100,
|
||||
OutMax: &a100,
|
||||
Factor: &a1,
|
||||
Fields: []string{"test"},
|
||||
},
|
||||
},
|
||||
fields: []string{"test"},
|
||||
expectedErrorMsg: "cannot use factor/offset and minimum/maximum at the same time",
|
||||
},
|
||||
{
|
||||
name: "No scaling",
|
||||
expectedErrorMsg: "no valid scaling defined",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
plugin := &Scale{
|
||||
Scalings: tt.scaling,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
err := plugin.Init()
|
||||
require.ErrorContains(t, err, tt.expectedErrorMsg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTracking(t *testing.T) {
|
||||
inputRaw := []telegraf.Metric{
|
||||
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42}, time.Unix(0, 0)),
|
||||
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 99}, time.Unix(0, 0)),
|
||||
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 1}, time.Unix(0, 0)),
|
||||
}
|
||||
|
||||
var mu sync.Mutex
|
||||
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
|
||||
notify := func(di telegraf.DeliveryInfo) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
delivered = append(delivered, di)
|
||||
}
|
||||
|
||||
input := make([]telegraf.Metric, 0, len(inputRaw))
|
||||
for _, m := range inputRaw {
|
||||
tm, _ := metric.WithTracking(m, notify)
|
||||
input = append(input, tm)
|
||||
}
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
metric.New(
|
||||
"foo",
|
||||
map[string]string{},
|
||||
map[string]interface{}{"value": float64(92)},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"bar",
|
||||
map[string]string{},
|
||||
map[string]interface{}{"value": float64(149)},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"baz",
|
||||
map[string]string{},
|
||||
map[string]interface{}{"value": float64(51)},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
}
|
||||
|
||||
inMin := float64(0)
|
||||
inMax := float64(50)
|
||||
outMin := float64(50)
|
||||
outMax := float64(100)
|
||||
|
||||
plugin := &Scale{
|
||||
Scalings: []Scaling{
|
||||
{
|
||||
InMin: &inMin,
|
||||
InMax: &inMax,
|
||||
OutMin: &outMin,
|
||||
OutMax: &outMax,
|
||||
Fields: []string{"value"},
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
|
||||
// Process expected metrics and compare with resulting metrics
|
||||
actual := plugin.Apply(input...)
|
||||
testutil.RequireMetricsEqual(t, expected, actual)
|
||||
|
||||
// Simulate output acknowledging delivery
|
||||
for _, m := range actual {
|
||||
m.Accept()
|
||||
}
|
||||
|
||||
// Check delivery
|
||||
require.Eventuallyf(t, func() bool {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return len(input) == len(delivered)
|
||||
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue