1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1 @@
package all

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.aws_ec2
package all
import _ "github.com/influxdata/telegraf/plugins/processors/aws_ec2" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.batch
package all
import _ "github.com/influxdata/telegraf/plugins/processors/batch" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.clone
package all
import _ "github.com/influxdata/telegraf/plugins/processors/clone" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.converter
package all
import _ "github.com/influxdata/telegraf/plugins/processors/converter" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.date
package all
import _ "github.com/influxdata/telegraf/plugins/processors/date" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.dedup
package all
import _ "github.com/influxdata/telegraf/plugins/processors/dedup" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.defaults
package all
import _ "github.com/influxdata/telegraf/plugins/processors/defaults" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.enum
package all
import _ "github.com/influxdata/telegraf/plugins/processors/enum" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.execd
package all
import _ "github.com/influxdata/telegraf/plugins/processors/execd" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.filepath
package all
import _ "github.com/influxdata/telegraf/plugins/processors/filepath" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.filter
package all
import _ "github.com/influxdata/telegraf/plugins/processors/filter" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.ifname
package all
import _ "github.com/influxdata/telegraf/plugins/processors/ifname" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.lookup
package all
import _ "github.com/influxdata/telegraf/plugins/processors/lookup" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.noise
package all
import _ "github.com/influxdata/telegraf/plugins/processors/noise" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.override
package all
import _ "github.com/influxdata/telegraf/plugins/processors/override" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.parser
package all
import _ "github.com/influxdata/telegraf/plugins/processors/parser" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.pivot
package all
import _ "github.com/influxdata/telegraf/plugins/processors/pivot" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.port_name
package all
import _ "github.com/influxdata/telegraf/plugins/processors/port_name" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.printer
package all
import _ "github.com/influxdata/telegraf/plugins/processors/printer" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.timestamp
package all
import _ "github.com/influxdata/telegraf/plugins/processors/timestamp" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.regex
package all
import _ "github.com/influxdata/telegraf/plugins/processors/regex" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.rename
package all
import _ "github.com/influxdata/telegraf/plugins/processors/rename" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.reverse_dns
package all
import _ "github.com/influxdata/telegraf/plugins/processors/reverse_dns" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.s2geo
package all
import _ "github.com/influxdata/telegraf/plugins/processors/s2geo" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.scale
package all
import _ "github.com/influxdata/telegraf/plugins/processors/scale" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.snmp_lookup
package all
import _ "github.com/influxdata/telegraf/plugins/processors/snmp_lookup" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.split
package all
import _ "github.com/influxdata/telegraf/plugins/processors/split" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.starlark
package all
import _ "github.com/influxdata/telegraf/plugins/processors/starlark" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.strings
package all
import _ "github.com/influxdata/telegraf/plugins/processors/strings" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.tag_limit
package all
import _ "github.com/influxdata/telegraf/plugins/processors/tag_limit" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.template
package all
import _ "github.com/influxdata/telegraf/plugins/processors/template" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.topk
package all
import _ "github.com/influxdata/telegraf/plugins/processors/topk" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || processors || processors.unpivot
package all
import _ "github.com/influxdata/telegraf/plugins/processors/unpivot" // register plugin

View file

@ -0,0 +1,126 @@
# AWS EC2 Metadata Processor Plugin
AWS EC2 Metadata processor plugin appends metadata gathered from [AWS IMDS][]
to metrics associated with EC2 instances.
[AWS IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Attach AWS EC2 metadata to metrics
[[processors.aws_ec2]]
## Instance identity document tags to attach to metrics.
## For more information see:
## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
##
## Available tags:
## * accountId
## * architecture
## * availabilityZone
## * billingProducts
## * imageId
## * instanceId
## * instanceType
## * kernelId
## * pendingTime
## * privateIp
## * ramdiskId
## * region
## * version
# imds_tags = []
## EC2 instance tags retrieved with DescribeTags action.
## In case tag is empty upon retrieval it's omitted when tagging metrics.
## Note that in order for this to work, role attached to EC2 instance or AWS
## credentials available from the environment must have a policy attached, that
## allows ec2:DescribeTags.
##
## For more information see:
## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html
# ec2_tags = []
## Paths to instance metadata information to attach to the metrics.
## Specify the full path without the base-path e.g. `tags/instance/Name`.
##
## For more information see:
## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
# metadata_paths = []
## Allows to convert metadata tag-names to canonical names representing the
## full path with slashes ('/') being replaces with underscores. By default,
## only the last path element is used to name the tag.
# canonical_metadata_tags = false
## Timeout for http requests made by against aws ec2 metadata endpoint.
# timeout = "10s"
## ordered controls whether or not the metrics need to stay in the same order
## this plugin received them in. If false, this plugin will change the order
## with requests hitting cached results moving through immediately and not
## waiting on slower lookups. This may cause issues for you if you are
## depending on the order of metrics staying the same. If so, set this to true.
## Keeping the metrics ordered may be slightly slower.
# ordered = false
## max_parallel_calls is the maximum number of AWS API calls to be in flight
## at the same time.
## It's probably best to keep this number fairly low.
# max_parallel_calls = 10
## cache_ttl determines how long each cached item will remain in the cache before
## it is removed and subsequently needs to be queried for from the AWS API. By
## default, no items are cached.
# cache_ttl = "0s"
## tag_cache_size determines how many of the values which are found in imds_tags
## or ec2_tags will be kept in memory for faster lookup on successive processing
## of metrics. You may want to adjust this if you have excessively large numbers
## of tags on your EC2 instances, and you are using the ec2_tags field. This
## typically does not need to be changed when using the imds_tags field.
# tag_cache_size = 1000
## log_cache_stats will emit a log line periodically to stdout with details of
## cache entries, hits, misses, and evacuations since the last time stats were
## emitted. This can be helpful in determining whether caching is being effective
## in your environment. Stats are emitted every 30 seconds. By default, this
## setting is disabled.
# log_cache_stats = false
```
## Example
Append `accountId` and `instanceId` to metrics tags:
```toml
[[processors.aws_ec2]]
tags = [ "accountId", "instanceId"]
```
```diff
- cpu,hostname=localhost time_idle=42
+ cpu,hostname=localhost,accountId=123456789,instanceId=i-123456789123 time_idle=42
```
## Notes
We use a single cache because telegraf's `AddTag` function models this.
A user can specify a list of both EC2 tags and IMDS tags. The items in this list
can, technically, be the same. This will result in a situation where the EC2
Tag's value will override the IMDS tags value.
Though this is undesirable, it is unavoidable because the `AddTag` function does
not support this case.
You should avoid using IMDS tags as EC2 tags because the EC2 tags will always
"win" due to them being processed in this plugin *after* IMDS tags.

View file

@ -0,0 +1,401 @@
//go:generate ../../../tools/readme_config_includer/generator
package aws_ec2
import (
"context"
_ "embed"
"errors"
"fmt"
"io"
"slices"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/ec2"
"github.com/aws/aws-sdk-go-v2/service/ec2/types"
"github.com/aws/smithy-go"
"github.com/coocood/freecache"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/common/parallel"
"github.com/influxdata/telegraf/plugins/processors"
)
//go:embed sample.conf
var sampleConfig string
type AwsEc2Processor struct {
ImdsTags []string `toml:"imds_tags"`
EC2Tags []string `toml:"ec2_tags"`
MetadataPaths []string `toml:"metadata_paths"`
CanonicalMetadataTags bool `toml:"canonical_metadata_tags"`
Timeout config.Duration `toml:"timeout"`
CacheTTL config.Duration `toml:"cache_ttl"`
Ordered bool `toml:"ordered"`
MaxParallelCalls int `toml:"max_parallel_calls"`
TagCacheSize int `toml:"tag_cache_size"`
LogCacheStats bool `toml:"log_cache_stats"`
Log telegraf.Logger `toml:"-"`
tagCache *freecache.Cache
imdsClient *imds.Client
ec2Client *ec2.Client
parallel parallel.Parallel
instanceID string
cancelCleanupWorker context.CancelFunc
}
const (
DefaultMaxOrderedQueueSize = 10_000
DefaultMaxParallelCalls = 10
DefaultTimeout = 10 * time.Second
DefaultCacheTTL = 0 * time.Hour
DefaultCacheSize = 1000
DefaultLogCacheStats = false
)
var allowedImdsTags = []string{
"accountId",
"architecture",
"availabilityZone",
"billingProducts",
"imageId",
"instanceId",
"instanceType",
"kernelId",
"pendingTime",
"privateIp",
"ramdiskId",
"region",
"version",
}
func (*AwsEc2Processor) SampleConfig() string {
return sampleConfig
}
func (r *AwsEc2Processor) Add(metric telegraf.Metric, _ telegraf.Accumulator) error {
r.parallel.Enqueue(metric)
return nil
}
func (r *AwsEc2Processor) Init() error {
r.Log.Debug("Initializing AWS EC2 Processor")
if len(r.ImdsTags) == 0 && len(r.MetadataPaths) == 0 && len(r.EC2Tags) == 0 {
return errors.New("no tags specified in configuration")
}
for _, tag := range r.ImdsTags {
if tag == "" || !slices.Contains(allowedImdsTags, tag) {
return fmt.Errorf("invalid imds tag %q", tag)
}
}
return nil
}
func (r *AwsEc2Processor) Start(acc telegraf.Accumulator) error {
r.tagCache = freecache.NewCache(r.TagCacheSize)
if r.LogCacheStats {
ctx, cancel := context.WithCancel(context.Background())
r.cancelCleanupWorker = cancel
go r.logCacheStatistics(ctx)
}
r.Log.Debugf("cache: size=%d\n", r.TagCacheSize)
if r.CacheTTL > 0 {
r.Log.Debugf("cache timeout: seconds=%d\n", int(time.Duration(r.CacheTTL).Seconds()))
}
ctx := context.Background()
cfg, err := awsconfig.LoadDefaultConfig(ctx)
if err != nil {
return fmt.Errorf("failed loading default AWS config: %w", err)
}
r.imdsClient = imds.NewFromConfig(cfg)
iido, err := r.imdsClient.GetInstanceIdentityDocument(
ctx,
&imds.GetInstanceIdentityDocumentInput{},
)
if err != nil {
return fmt.Errorf("failed getting instance identity document: %w", err)
}
r.instanceID = iido.InstanceID
if len(r.EC2Tags) > 0 {
// Add region to AWS config when creating EC2 service client since it's required.
cfg.Region = iido.Region
r.ec2Client = ec2.NewFromConfig(cfg)
// Check if instance is allowed to call DescribeTags.
_, err = r.ec2Client.DescribeTags(ctx, &ec2.DescribeTagsInput{
DryRun: aws.Bool(true),
})
var ae smithy.APIError
if errors.As(err, &ae) {
if ae.ErrorCode() != "DryRunOperation" {
return fmt.Errorf("instance doesn't have permissions to call DescribeTags: %w", err)
}
} else if err != nil {
return fmt.Errorf("error calling DescribeTags: %w", err)
}
}
if r.Ordered {
r.parallel = parallel.NewOrdered(acc, r.asyncAdd, DefaultMaxOrderedQueueSize, r.MaxParallelCalls)
} else {
r.parallel = parallel.NewUnordered(acc, r.asyncAdd, r.MaxParallelCalls)
}
return nil
}
func (r *AwsEc2Processor) Stop() {
if r.parallel != nil {
r.parallel.Stop()
}
if r.cancelCleanupWorker != nil {
r.cancelCleanupWorker()
r.cancelCleanupWorker = nil
}
}
func (r *AwsEc2Processor) logCacheStatistics(ctx context.Context) {
if r.tagCache == nil {
return
}
ticker := time.NewTicker(30 * time.Second)
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
r.Log.Debugf("cache: size=%d hit=%d miss=%d full=%d\n",
r.tagCache.EntryCount(),
r.tagCache.HitCount(),
r.tagCache.MissCount(),
r.tagCache.EvacuateCount(),
)
r.tagCache.ResetStatistics()
}
}
}
func (r *AwsEc2Processor) lookupIMDSTags(metric telegraf.Metric) telegraf.Metric {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(r.Timeout))
defer cancel()
var tagsNotFound []string
for _, tag := range r.ImdsTags {
val, err := r.tagCache.Get([]byte(tag))
if err != nil {
tagsNotFound = append(tagsNotFound, tag)
} else {
metric.AddTag(tag, string(val))
}
}
if len(tagsNotFound) == 0 {
return metric
}
doc, err := r.imdsClient.GetInstanceIdentityDocument(ctx, &imds.GetInstanceIdentityDocumentInput{})
if err != nil {
r.Log.Errorf("Error when calling GetInstanceIdentityDocument: %v", err)
return metric
}
for _, tag := range tagsNotFound {
var v string
switch tag {
case "accountId":
v = doc.AccountID
case "architecture":
v = doc.Architecture
case "availabilityZone":
v = doc.AvailabilityZone
case "billingProducts":
v = strings.Join(doc.BillingProducts, ",")
case "imageId":
v = doc.ImageID
case "instanceId":
v = doc.InstanceID
case "instanceType":
v = doc.InstanceType
case "kernelId":
v = doc.KernelID
case "pendingTime":
v = doc.PendingTime.String()
case "privateIp":
v = doc.PrivateIP
case "ramdiskId":
v = doc.RamdiskID
case "region":
v = doc.Region
case "version":
v = doc.Version
default:
continue
}
metric.AddTag(tag, v)
expiration := int(time.Duration(r.CacheTTL).Seconds())
if err := r.tagCache.Set([]byte(tag), []byte(v), expiration); err != nil {
r.Log.Errorf("Error when setting IMDS tag cache value: %v", err)
continue
}
}
return metric
}
func (r *AwsEc2Processor) lookupMetadata(metric telegraf.Metric) telegraf.Metric {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(r.Timeout))
defer cancel()
for _, path := range r.MetadataPaths {
key := strings.Trim(path, "/ ")
if r.CanonicalMetadataTags {
key = strings.ReplaceAll(key, "/", "_")
} else {
if idx := strings.LastIndex(key, "/"); idx > 0 {
key = key[idx+1:]
}
}
// Try to lookup the tag in cache
if value, err := r.tagCache.Get([]byte("metadata/" + path)); err == nil {
metric.AddTag(key, string(value))
continue
}
// Query the tag with the full path
resp, err := r.imdsClient.GetMetadata(ctx, &imds.GetMetadataInput{Path: path})
if err != nil {
r.Log.Errorf("Getting metadata %q failed: %v", path, err)
continue
}
value, err := io.ReadAll(resp.Content)
if err != nil {
r.Log.Errorf("Reading metadata reponse for %+v failed: %v", path, err)
continue
}
if len(value) > 0 {
metric.AddTag(key, string(value))
}
expiration := int(time.Duration(r.CacheTTL).Seconds())
if err = r.tagCache.Set([]byte("metadata/"+path), value, expiration); err != nil {
r.Log.Errorf("Updating metadata cache for %q failed: %v", path, err)
continue
}
}
return metric
}
func (r *AwsEc2Processor) lookupEC2Tags(metric telegraf.Metric) telegraf.Metric {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(r.Timeout))
defer cancel()
var tagsNotFound []string
for _, tag := range r.EC2Tags {
val, err := r.tagCache.Get([]byte(tag))
if err != nil {
tagsNotFound = append(tagsNotFound, tag)
} else {
metric.AddTag(tag, string(val))
}
}
if len(tagsNotFound) == 0 {
return metric
}
dto, err := r.ec2Client.DescribeTags(ctx, &ec2.DescribeTagsInput{
Filters: []types.Filter{
{
Name: aws.String("resource-id"),
Values: []string{r.instanceID},
},
{
Name: aws.String("key"),
Values: r.EC2Tags,
},
},
})
if err != nil {
r.Log.Errorf("Error during EC2 DescribeTags: %v", err)
return metric
}
for _, tag := range r.EC2Tags {
if v := getTagFromDescribeTags(dto, tag); v != "" {
metric.AddTag(tag, v)
expiration := int(time.Duration(r.CacheTTL).Seconds())
err = r.tagCache.Set([]byte(tag), []byte(v), expiration)
if err != nil {
r.Log.Errorf("Error when setting EC2Tags tag cache value: %v", err)
}
}
}
return metric
}
func (r *AwsEc2Processor) asyncAdd(metric telegraf.Metric) []telegraf.Metric {
// Add IMDS Instance Identity Document tags.
if len(r.ImdsTags) > 0 {
metric = r.lookupIMDSTags(metric)
}
// Add instance metadata tags.
if len(r.MetadataPaths) > 0 {
metric = r.lookupMetadata(metric)
}
// Add EC2 instance tags.
if len(r.EC2Tags) > 0 {
metric = r.lookupEC2Tags(metric)
}
return []telegraf.Metric{metric}
}
func init() {
processors.AddStreaming("aws_ec2", func() telegraf.StreamingProcessor {
return newAwsEc2Processor()
})
}
func newAwsEc2Processor() *AwsEc2Processor {
return &AwsEc2Processor{
MaxParallelCalls: DefaultMaxParallelCalls,
TagCacheSize: DefaultCacheSize,
Timeout: config.Duration(DefaultTimeout),
CacheTTL: config.Duration(DefaultCacheTTL),
}
}
func getTagFromDescribeTags(o *ec2.DescribeTagsOutput, tag string) string {
for _, t := range o.Tags {
if *t.Key == tag {
return *t.Value
}
}
return ""
}

View file

@ -0,0 +1,184 @@
package aws_ec2
import (
"sync"
"testing"
"time"
"github.com/coocood/freecache"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/common/parallel"
"github.com/influxdata/telegraf/testutil"
)
func TestBasicStartup(t *testing.T) {
p := newAwsEc2Processor()
p.Log = &testutil.Logger{}
p.ImdsTags = []string{"accountId", "instanceId"}
acc := &testutil.Accumulator{}
require.NoError(t, p.Init())
require.Empty(t, acc.GetTelegrafMetrics())
require.Empty(t, acc.Errors)
}
func TestBasicStartupWithEC2Tags(t *testing.T) {
p := newAwsEc2Processor()
p.Log = &testutil.Logger{}
p.ImdsTags = []string{"accountId", "instanceId"}
p.EC2Tags = []string{"Name"}
acc := &testutil.Accumulator{}
require.NoError(t, p.Init())
require.Empty(t, acc.GetTelegrafMetrics())
require.Empty(t, acc.Errors)
}
func TestBasicStartupWithCacheTTL(t *testing.T) {
p := newAwsEc2Processor()
p.Log = &testutil.Logger{}
p.ImdsTags = []string{"accountId", "instanceId"}
p.CacheTTL = config.Duration(12 * time.Hour)
acc := &testutil.Accumulator{}
require.NoError(t, p.Init())
require.Empty(t, acc.GetTelegrafMetrics())
require.Empty(t, acc.Errors)
}
func TestBasicStartupWithTagCacheSize(t *testing.T) {
p := newAwsEc2Processor()
p.Log = &testutil.Logger{}
p.ImdsTags = []string{"accountId", "instanceId"}
p.TagCacheSize = 100
acc := &testutil.Accumulator{}
require.NoError(t, p.Init())
require.Empty(t, acc.GetTelegrafMetrics())
require.Empty(t, acc.Errors)
}
func TestBasicInitNoTagsReturnAnError(t *testing.T) {
p := newAwsEc2Processor()
p.Log = &testutil.Logger{}
err := p.Init()
require.Error(t, err)
}
func TestBasicInitInvalidTagsReturnAnError(t *testing.T) {
p := newAwsEc2Processor()
p.Log = &testutil.Logger{}
p.ImdsTags = []string{"dummy", "qwerty"}
err := p.Init()
require.Error(t, err)
}
func TestTracking(t *testing.T) {
// Setup raw input and expected output
inputRaw := []telegraf.Metric{
metric.New(
"m1",
map[string]string{
"metric_tag": "from_metric",
},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
metric.New(
"m2",
map[string]string{
"metric_tag": "foo_metric",
},
map[string]interface{}{"value": int64(2)},
time.Unix(0, 0),
),
}
expected := []telegraf.Metric{
metric.New(
"m1",
map[string]string{
"metric_tag": "from_metric",
"accountId": "123456789",
"instanceId": "i-123456789123",
},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
metric.New(
"m2",
map[string]string{
"metric_tag": "foo_metric",
"accountId": "123456789",
"instanceId": "i-123456789123",
},
map[string]interface{}{"value": int64(2)},
time.Unix(0, 0),
),
}
// Create fake notification for testing
var mu sync.Mutex
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
notify := func(di telegraf.DeliveryInfo) {
mu.Lock()
defer mu.Unlock()
delivered = append(delivered, di)
}
// Convert raw input to tracking metric
input := make([]telegraf.Metric, 0, len(inputRaw))
for _, m := range inputRaw {
tm, _ := metric.WithTracking(m, notify)
input = append(input, tm)
}
// Prepare and start the plugin
plugin := &AwsEc2Processor{
MaxParallelCalls: DefaultMaxParallelCalls,
TagCacheSize: DefaultCacheSize,
Timeout: config.Duration(DefaultTimeout),
CacheTTL: config.Duration(DefaultCacheTTL),
ImdsTags: []string{"accountId", "instanceId"},
Log: &testutil.Logger{},
}
require.NoError(t, plugin.Init())
// Instead of starting the plugin which tries to connect to the remote
// service, we just fill the cache and start the minimum mechanics to
// process the metrics.
plugin.tagCache = freecache.NewCache(DefaultCacheSize)
require.NoError(t, plugin.tagCache.Set([]byte("accountId"), []byte("123456789"), -1))
require.NoError(t, plugin.tagCache.Set([]byte("instanceId"), []byte("i-123456789123"), -1))
var acc testutil.Accumulator
plugin.parallel = parallel.NewOrdered(&acc, plugin.asyncAdd, plugin.TagCacheSize, plugin.MaxParallelCalls)
// Schedule the metrics and wait until they are ready to perform the
// comparison
for _, in := range input {
require.NoError(t, plugin.Add(in, &acc))
}
require.Eventually(t, func() bool {
return int(acc.NMetrics()) >= len(expected)
}, 3*time.Second, 100*time.Millisecond)
actual := acc.GetTelegrafMetrics()
testutil.RequireMetricsEqual(t, expected, actual)
// Simulate output acknowledging delivery
for _, m := range actual {
m.Accept()
}
// Check delivery
require.Eventuallyf(t, func() bool {
mu.Lock()
defer mu.Unlock()
return len(input) == len(delivered)
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
}

View file

@ -0,0 +1,78 @@
# Attach AWS EC2 metadata to metrics
[[processors.aws_ec2]]
## Instance identity document tags to attach to metrics.
## For more information see:
## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
##
## Available tags:
## * accountId
## * architecture
## * availabilityZone
## * billingProducts
## * imageId
## * instanceId
## * instanceType
## * kernelId
## * pendingTime
## * privateIp
## * ramdiskId
## * region
## * version
# imds_tags = []
## EC2 instance tags retrieved with DescribeTags action.
## In case tag is empty upon retrieval it's omitted when tagging metrics.
## Note that in order for this to work, role attached to EC2 instance or AWS
## credentials available from the environment must have a policy attached, that
## allows ec2:DescribeTags.
##
## For more information see:
## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html
# ec2_tags = []
## Paths to instance metadata information to attach to the metrics.
## Specify the full path without the base-path e.g. `tags/instance/Name`.
##
## For more information see:
## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
# metadata_paths = []
## Allows to convert metadata tag-names to canonical names representing the
## full path with slashes ('/') being replaces with underscores. By default,
## only the last path element is used to name the tag.
# canonical_metadata_tags = false
## Timeout for http requests made by against aws ec2 metadata endpoint.
# timeout = "10s"
## ordered controls whether or not the metrics need to stay in the same order
## this plugin received them in. If false, this plugin will change the order
## with requests hitting cached results moving through immediately and not
## waiting on slower lookups. This may cause issues for you if you are
## depending on the order of metrics staying the same. If so, set this to true.
## Keeping the metrics ordered may be slightly slower.
# ordered = false
## max_parallel_calls is the maximum number of AWS API calls to be in flight
## at the same time.
## It's probably best to keep this number fairly low.
# max_parallel_calls = 10
## cache_ttl determines how long each cached item will remain in the cache before
## it is removed and subsequently needs to be queried for from the AWS API. By
## default, no items are cached.
# cache_ttl = "0s"
## tag_cache_size determines how many of the values which are found in imds_tags
## or ec2_tags will be kept in memory for faster lookup on successive processing
## of metrics. You may want to adjust this if you have excessively large numbers
## of tags on your EC2 instances, and you are using the ec2_tags field. This
## typically does not need to be changed when using the imds_tags field.
# tag_cache_size = 1000
## log_cache_stats will emit a log line periodically to stdout with details of
## cache entries, hits, misses, and evacuations since the last time stats were
## emitted. This can be helpful in determining whether caching is being effective
## in your environment. Stats are emitted every 30 seconds. By default, this
## setting is disabled.
# log_cache_stats = false

View file

@ -0,0 +1,60 @@
# Batch Processor Plugin
This processor groups metrics into batches by adding a batch tag. This is
useful for parallel processing of metrics where downstream processors,
aggregators or outputs can then select a batch using `tagpass` or `metricpass`.
Metrics are distributed across batches using the round-robin scheme.
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
## Batch metrics into separate batches by adding a tag indicating the batch index.
[[processors.batch]]
## The name of the tag to use for adding the batch index
batch_tag = "my_batch"
## The number of batches to create
batches = 16
## Do not assign metrics with an existing batch assignment to a
## different batch.
# skip_existing = false
```
## Example
The example below uses these settings:
```toml
[[processors.batch]]
## The tag key to use for batching
batch_tag = "batch"
## The number of batches to create
batches = 3
```
```diff
- temperature cpu=25
- temperature cpu=50
- temperature cpu=75
- temperature cpu=25
- temperature cpu=50
- temperature cpu=75
+ temperature,batch=0 cpu=25
+ temperature,batch=1 cpu=50
+ temperature,batch=2 cpu=75
+ temperature,batch=0 cpu=25
+ temperature,batch=1 cpu=50
+ temperature,batch=2 cpu=75
```

View file

@ -0,0 +1,49 @@
package batch
import (
_ "embed"
"strconv"
"sync/atomic"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/processors"
)
//go:embed sample.conf
var sampleConfig string
type Batch struct {
BatchTag string `toml:"batch_tag"`
NumBatches uint64 `toml:"batches"`
SkipExisting bool `toml:"skip_existing"`
// the number of metrics that have been processed so far
count atomic.Uint64
}
func (*Batch) SampleConfig() string {
return sampleConfig
}
func (b *Batch) Apply(in ...telegraf.Metric) []telegraf.Metric {
out := make([]telegraf.Metric, 0, len(in))
for _, m := range in {
if b.SkipExisting && m.HasTag(b.BatchTag) {
out = append(out, m)
continue
}
oldCount := b.count.Add(1) - 1
batchID := oldCount % b.NumBatches
m.AddTag(b.BatchTag, strconv.FormatUint(batchID, 10))
out = append(out, m)
}
return out
}
func init() {
processors.Add("batch", func() telegraf.Processor {
return &Batch{}
})
}

View file

@ -0,0 +1,112 @@
package batch
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
const batchTag = "?internal_batch_idx"
func Test_SingleMetricPutInBatch0(t *testing.T) {
b := &Batch{
BatchTag: batchTag,
NumBatches: 1,
}
m := testutil.MockMetricsWithValue(1)
expectedM := testutil.MockMetricsWithValue(1)
expectedM[0].AddTag(batchTag, "0")
res := b.Apply(m...)
testutil.RequireMetricsEqual(t, expectedM, res)
}
func Test_MetricsSmallerThanBatchSizeAreInDifferentBatches(t *testing.T) {
b := &Batch{
BatchTag: batchTag,
NumBatches: 3,
}
ms := make([]telegraf.Metric, 0, 2)
for range cap(ms) {
ms = append(ms, testutil.MockMetrics()...)
}
res := b.Apply(ms...)
batchTagValue, ok := res[0].GetTag(batchTag)
require.True(t, ok)
require.Equal(t, "0", batchTagValue)
batchTagValue, ok = res[1].GetTag(batchTag)
require.True(t, ok)
require.Equal(t, "1", batchTagValue)
}
func Test_MetricsEqualToBatchSizeInDifferentBatches(t *testing.T) {
b := &Batch{
BatchTag: batchTag,
NumBatches: 3,
}
ms := make([]telegraf.Metric, 0, 3)
for range cap(ms) {
ms = append(ms, testutil.MockMetrics()...)
}
res := b.Apply(ms...)
batchTagValue, ok := res[0].GetTag(batchTag)
require.True(t, ok)
require.Equal(t, "0", batchTagValue)
batchTagValue, ok = res[1].GetTag(batchTag)
require.True(t, ok)
require.Equal(t, "1", batchTagValue)
batchTagValue, ok = res[2].GetTag(batchTag)
require.True(t, ok)
require.Equal(t, "2", batchTagValue)
}
func Test_MetricsMoreThanBatchSizeInSameBatch(t *testing.T) {
b := &Batch{
BatchTag: batchTag,
NumBatches: 2,
}
ms := make([]telegraf.Metric, 0, 3)
for range cap(ms) {
ms = append(ms, testutil.MockMetrics()...)
}
res := b.Apply(ms...)
batchTagValue, ok := res[0].GetTag(batchTag)
require.True(t, ok)
require.Equal(t, "0", batchTagValue)
batchTagValue, ok = res[1].GetTag(batchTag)
require.True(t, ok)
require.Equal(t, "1", batchTagValue)
batchTagValue, ok = res[2].GetTag(batchTag)
require.True(t, ok)
require.Equal(t, "0", batchTagValue)
}
func Test_MetricWithExistingTagNotChanged(t *testing.T) {
b := &Batch{
BatchTag: batchTag,
NumBatches: 2,
SkipExisting: true,
}
m := testutil.MockMetricsWithValue(1)
m[0].AddTag(batchTag, "4")
res := b.Apply(m...)
tv, ok := res[0].GetTag(batchTag)
require.True(t, ok)
require.Equal(t, "4", tv)
}

View file

@ -0,0 +1,11 @@
## Batch metrics into separate batches by adding a tag indicating the batch index.
[[processors.batch]]
## The name of the tag to use for adding the batch index
batch_tag = "my_batch"
## The number of batches to create
batches = 16
## Do not assign metrics with an existing batch assignment to a
## different batch.
# skip_existing = false

View file

@ -0,0 +1,48 @@
# Clone Processor Plugin
The clone processor plugin create a copy of each metric passing through it,
preserving untouched the original metric and allowing modifications in the
copied one.
The modifications allowed are the ones supported by input plugins and
aggregators:
* name_override
* name_prefix
* name_suffix
* tags
Select the metrics to modify using the standard [metric
filtering](../../../docs/CONFIGURATION.md#metric-filtering) options. Filtering
options apply to both the clone and the original.
Values of *name_override*, *name_prefix*, *name_suffix* and already present
*tags* with conflicting keys will be overwritten. Absent *tags* will be
created.
A typical use-case is gathering metrics once and cloning them to simulate
having several hosts (modifying ``host`` tag).
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Apply metric modifications using override semantics.
[[processors.clone]]
## All modifications on inputs and aggregators can be overridden:
# name_override = "new_name"
# name_prefix = "new_name_prefix"
# name_suffix = "new_name_suffix"
## Tags to be added (all values must be strings)
# [processors.clone.tags]
# additional_tag = "tag_value"
```

View file

@ -0,0 +1,52 @@
//go:generate ../../../tools/readme_config_includer/generator
package clone
import (
_ "embed"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/processors"
)
//go:embed sample.conf
var sampleConfig string
type Clone struct {
NameOverride string
NamePrefix string
NameSuffix string
Tags map[string]string
}
func (*Clone) SampleConfig() string {
return sampleConfig
}
func (c *Clone) Apply(in ...telegraf.Metric) []telegraf.Metric {
out := make([]telegraf.Metric, 0, 2*len(in))
for _, original := range in {
m := original.Copy()
if len(c.NameOverride) > 0 {
m.SetName(c.NameOverride)
}
if len(c.NamePrefix) > 0 {
m.AddPrefix(c.NamePrefix)
}
if len(c.NameSuffix) > 0 {
m.AddSuffix(c.NameSuffix)
}
for key, value := range c.Tags {
m.AddTag(key, value)
}
out = append(out, m)
}
return append(out, in...)
}
func init() {
processors.Add("clone", func() telegraf.Processor {
return &Clone{}
})
}

View file

@ -0,0 +1,242 @@
package clone
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
func TestRetainsTags(t *testing.T) {
input := metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
)
expected := []telegraf.Metric{
metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
}
plugin := &Clone{}
actual := plugin.Apply(input)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestAddTags(t *testing.T) {
input := metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
)
expected := []telegraf.Metric{
metric.New(
"m1",
map[string]string{
"metric_tag": "from_metric",
"added_tag": "from_config",
"another_tag": "",
},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
}
plugin := &Clone{
Tags: map[string]string{
"added_tag": "from_config",
"another_tag": "",
},
}
actual := plugin.Apply(input)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestOverwritesPresentTagValues(t *testing.T) {
input := metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
)
expected := []telegraf.Metric{
metric.New(
"m1",
map[string]string{"metric_tag": "from_config"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
}
plugin := &Clone{
Tags: map[string]string{"metric_tag": "from_config"},
}
actual := plugin.Apply(input)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestOverridesName(t *testing.T) {
input := metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
)
expected := []telegraf.Metric{
metric.New(
"overridden",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
}
plugin := &Clone{NameOverride: "overridden"}
actual := plugin.Apply(input)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestNamePrefix(t *testing.T) {
input := metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
)
expected := []telegraf.Metric{
metric.New(
"Pre-m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
}
plugin := &Clone{NamePrefix: "Pre-"}
actual := plugin.Apply(input)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestNameSuffix(t *testing.T) {
input := metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
)
expected := []telegraf.Metric{
metric.New(
"m1-suff",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Unix(0, 0),
),
}
plugin := &Clone{NameSuffix: "-suff"}
actual := plugin.Apply(input)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestTracking(t *testing.T) {
inputRaw := []telegraf.Metric{
metric.New(
"m1",
map[string]string{"metric_tag": "from_metric"},
map[string]interface{}{"value": int64(1)},
time.Now(),
),
metric.New(
"m2",
map[string]string{"metric_tag": "foo_metric"},
map[string]interface{}{"value": int64(2)},
time.Now(),
),
}
var mu sync.Mutex
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
notify := func(di telegraf.DeliveryInfo) {
mu.Lock()
defer mu.Unlock()
delivered = append(delivered, di)
}
input := make([]telegraf.Metric, 0, len(inputRaw))
expected := make([]telegraf.Metric, 0, 2*len(input))
for _, m := range inputRaw {
tm, _ := metric.WithTracking(m, notify)
input = append(input, tm)
expected = append(expected, m)
}
expected = append(expected, input...)
// Process expected metrics and compare with resulting metrics
plugin := &Clone{}
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
// Simulate output acknowledging delivery
for _, m := range actual {
m.Accept()
}
// Check delivery
require.Eventuallyf(t, func() bool {
mu.Lock()
defer mu.Unlock()
return len(input) == len(delivered)
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
}

View file

@ -0,0 +1,10 @@
# Apply metric modifications using override semantics.
[[processors.clone]]
## All modifications on inputs and aggregators can be overridden:
# name_override = "new_name"
# name_prefix = "new_name_prefix"
# name_suffix = "new_name_suffix"
## Tags to be added (all values must be strings)
# [processors.clone.tags]
# additional_tag = "tag_value"

View file

@ -0,0 +1,139 @@
# Converter Processor Plugin
The converter processor is used to change the type of tag or field values. In
addition to changing field types it can convert between fields and tags.
Values that cannot be converted are dropped.
**Note:** When converting tags to fields, take care to ensure the series is
still uniquely identifiable. Fields with the same series key (measurement +
tags) will overwrite one another.
**Note on large strings being converted to numeric types:** When converting a
string value to a numeric type, precision may be lost if the number is too
large. The largest numeric type this plugin supports is `float64`, and if a
string 'number' exceeds its size limit, accuracy may be lost.
**Note on multiple measurement or timestamps:** Users can provide multiple
tags or fields to use as the measurement name or timestamp. However, note that
the order in the array is not guaranteed!
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Convert values to another metric value type
[[processors.converter]]
## Tags to convert
##
## The table key determines the target type, and the array of key-values
## select the keys to convert. The array may contain globs.
## <target-type> = [<tag-key>...]
[processors.converter.tags]
measurement = []
string = []
integer = []
unsigned = []
boolean = []
float = []
## Optional tag to use as metric timestamp
# timestamp = []
## Format of the timestamp determined by the tag above. This can be any of
## "unix", "unix_ms", "unix_us", "unix_ns", or a valid Golang time format.
## It is required, when using the timestamp option.
# timestamp_format = ""
## Fields to convert
##
## The table key determines the target type, and the array of key-values
## select the keys to convert. The array may contain globs.
## <target-type> = [<field-key>...]
[processors.converter.fields]
measurement = []
tag = []
string = []
integer = []
unsigned = []
boolean = []
float = []
## Optional field to use for converting base64 encoding of IEEE 754 Float32 values
## i.e. data_json_content_state_openconfig-platform-psu:output-power":"RKeAAA=="
## into a float32 value 1340
# base64_ieee_float32 = []
## Optional field to use as metric timestamp
# timestamp = []
## Format of the timestamp determined by the field above. This can be any
## of "unix", "unix_ms", "unix_us", "unix_ns", or a valid Golang time
## format. It is required, when using the timestamp option.
# timestamp_format = ""
```
### Example
Convert `port` tag to a string field:
```toml
[[processors.converter]]
[processors.converter.tags]
string = ["port"]
```
```diff
- apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0
+ apache,server=debian-stretch-apache port="80",BusyWorkers=1,BytesPerReq=0
```
Convert all `scboard_*` fields to an integer:
```toml
[[processors.converter]]
[processors.converter.fields]
integer = ["scboard_*"]
```
```diff
- apache scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49
+ apache scboard_closing=0i,scboard_dnslookup=0i,scboard_finishing=0i,scboard_idle_cleanup=0i,scboard_keepalive=0i,scboard_logging=0i,scboard_open=100i,scboard_reading=0i,scboard_sending=1i,scboard_starting=0i,scboard_waiting=49i
```
Rename the measurement from a tag value:
```toml
[[processors.converter]]
[processors.converter.tags]
measurement = ["topic"]
```
```diff
- mqtt_consumer,topic=sensor temp=42
+ sensor temp=42
```
Set the metric timestamp from a tag:
```toml
[[processors.converter]]
[processors.converter.tags]
timestamp = ["time"]
timestamp_format = "unix
```
```diff
- metric,time="1677610769" temp=42
+ metric temp=42 1677610769
```
This is also possible via the fields converter.

View file

@ -0,0 +1,396 @@
//go:generate ../../../tools/readme_config_includer/generator
package converter
import (
_ "embed"
"encoding/base64"
"errors"
"fmt"
"math"
"math/big"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/processors"
)
//go:embed sample.conf
var sampleConfig string
type Conversion struct {
Measurement []string `toml:"measurement"`
Tag []string `toml:"tag"`
String []string `toml:"string"`
Integer []string `toml:"integer"`
Unsigned []string `toml:"unsigned"`
Boolean []string `toml:"boolean"`
Float []string `toml:"float"`
Timestamp []string `toml:"timestamp"`
TimestampFormat string `toml:"timestamp_format"`
Base64IEEEFloat32 []string `toml:"base64_ieee_float32"`
}
type Converter struct {
Tags *Conversion `toml:"tags"`
Fields *Conversion `toml:"fields"`
Log telegraf.Logger `toml:"-"`
tagConversions *ConversionFilter
fieldConversions *ConversionFilter
}
type ConversionFilter struct {
Measurement filter.Filter
Tag filter.Filter
String filter.Filter
Integer filter.Filter
Unsigned filter.Filter
Boolean filter.Filter
Float filter.Filter
Timestamp filter.Filter
Base64IEEEFloat32 filter.Filter
}
func (*Converter) SampleConfig() string {
return sampleConfig
}
func (p *Converter) Init() error {
return p.compile()
}
func (p *Converter) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
for _, metric := range metrics {
p.convertTags(metric)
p.convertFields(metric)
}
return metrics
}
func (p *Converter) compile() error {
tf, err := compileFilter(p.Tags)
if err != nil {
return err
}
ff, err := compileFilter(p.Fields)
if err != nil {
return err
}
if tf == nil && ff == nil {
return errors.New("no filters found")
}
p.tagConversions = tf
p.fieldConversions = ff
return nil
}
func compileFilter(conv *Conversion) (*ConversionFilter, error) {
if conv == nil {
return nil, nil
}
var err error
cf := &ConversionFilter{}
cf.Measurement, err = filter.Compile(conv.Measurement)
if err != nil {
return nil, err
}
cf.Tag, err = filter.Compile(conv.Tag)
if err != nil {
return nil, err
}
cf.String, err = filter.Compile(conv.String)
if err != nil {
return nil, err
}
cf.Integer, err = filter.Compile(conv.Integer)
if err != nil {
return nil, err
}
cf.Unsigned, err = filter.Compile(conv.Unsigned)
if err != nil {
return nil, err
}
cf.Boolean, err = filter.Compile(conv.Boolean)
if err != nil {
return nil, err
}
cf.Float, err = filter.Compile(conv.Float)
if err != nil {
return nil, err
}
cf.Timestamp, err = filter.Compile(conv.Timestamp)
if err != nil {
return nil, err
}
cf.Base64IEEEFloat32, err = filter.Compile(conv.Base64IEEEFloat32)
if err != nil {
return nil, err
}
return cf, nil
}
// convertTags converts tags into measurements or fields.
func (p *Converter) convertTags(metric telegraf.Metric) {
if p.tagConversions == nil {
return
}
for key, value := range metric.Tags() {
switch {
case p.tagConversions.Measurement != nil && p.tagConversions.Measurement.Match(key):
metric.SetName(value)
case p.tagConversions.String != nil && p.tagConversions.String.Match(key):
metric.AddField(key, value)
case p.tagConversions.Integer != nil && p.tagConversions.Integer.Match(key):
if v, err := toInteger(value); err != nil {
p.Log.Errorf("Converting to integer [%T] failed: %v", value, err)
} else {
metric.AddField(key, v)
}
case p.tagConversions.Unsigned != nil && p.tagConversions.Unsigned.Match(key):
if v, err := toUnsigned(value); err != nil {
p.Log.Errorf("Converting to unsigned [%T] failed: %v", value, err)
} else {
metric.AddField(key, v)
}
case p.tagConversions.Boolean != nil && p.tagConversions.Boolean.Match(key):
if v, err := internal.ToBool(value); err != nil {
p.Log.Errorf("Converting to boolean [%T] failed: %v", value, err)
} else {
metric.AddField(key, v)
}
case p.tagConversions.Float != nil && p.tagConversions.Float.Match(key):
if v, err := toFloat(value); err != nil {
p.Log.Errorf("Converting to float [%T] failed: %v", value, err)
} else {
metric.AddField(key, v)
}
case p.tagConversions.Timestamp != nil && p.tagConversions.Timestamp.Match(key):
time, err := internal.ParseTimestamp(p.Tags.TimestampFormat, value, nil)
if err != nil {
p.Log.Errorf("Converting to timestamp [%T] failed: %v", value, err)
continue
}
metric.SetTime(time)
default:
continue
}
metric.RemoveTag(key)
}
}
// convertFields converts fields into measurements, tags, or other field types.
func (p *Converter) convertFields(metric telegraf.Metric) {
if p.fieldConversions == nil {
return
}
for key, value := range metric.Fields() {
switch {
case p.fieldConversions.Measurement != nil && p.fieldConversions.Measurement.Match(key):
if v, err := internal.ToString(value); err != nil {
p.Log.Errorf("Converting to measurement [%T] failed: %v", value, err)
} else {
metric.SetName(v)
}
metric.RemoveField(key)
case p.fieldConversions.Tag != nil && p.fieldConversions.Tag.Match(key):
if v, err := internal.ToString(value); err != nil {
p.Log.Errorf("Converting to tag [%T] failed: %v", value, err)
} else {
metric.AddTag(key, v)
}
metric.RemoveField(key)
case p.fieldConversions.Float != nil && p.fieldConversions.Float.Match(key):
if v, err := toFloat(value); err != nil {
p.Log.Errorf("Converting to float [%T] failed: %v", value, err)
metric.RemoveField(key)
} else {
metric.AddField(key, v)
}
case p.fieldConversions.Integer != nil && p.fieldConversions.Integer.Match(key):
if v, err := toInteger(value); err != nil {
p.Log.Errorf("Converting to integer [%T] failed: %v", value, err)
metric.RemoveField(key)
} else {
metric.AddField(key, v)
}
case p.fieldConversions.Unsigned != nil && p.fieldConversions.Unsigned.Match(key):
if v, err := toUnsigned(value); err != nil {
p.Log.Errorf("Converting to unsigned [%T] failed: %v", value, err)
metric.RemoveField(key)
} else {
metric.AddField(key, v)
}
case p.fieldConversions.Boolean != nil && p.fieldConversions.Boolean.Match(key):
if v, err := internal.ToBool(value); err != nil {
p.Log.Errorf("Converting to bool [%T] failed: %v", value, err)
metric.RemoveField(key)
} else {
metric.AddField(key, v)
}
case p.fieldConversions.String != nil && p.fieldConversions.String.Match(key):
if v, err := internal.ToString(value); err != nil {
p.Log.Errorf("Converting to string [%T] failed: %v", value, err)
metric.RemoveField(key)
} else {
metric.AddField(key, v)
}
case p.fieldConversions.Timestamp != nil && p.fieldConversions.Timestamp.Match(key):
if time, err := internal.ParseTimestamp(p.Fields.TimestampFormat, value, nil); err != nil {
p.Log.Errorf("Converting to timestamp [%T] failed: %v", value, err)
} else {
metric.SetTime(time)
metric.RemoveField(key)
}
case p.fieldConversions.Base64IEEEFloat32 != nil && p.fieldConversions.Base64IEEEFloat32.Match(key):
if v, err := base64ToFloat32(value.(string)); err != nil {
p.Log.Errorf("Converting to base64_ieee_float32 [%T] failed: %v", value, err)
metric.RemoveField(key)
} else {
metric.AddField(key, v)
}
}
}
}
func toInteger(v interface{}) (int64, error) {
switch value := v.(type) {
case float32:
if value < float32(math.MinInt64) {
return math.MinInt64, nil
}
if value > float32(math.MaxInt64) {
return math.MaxInt64, nil
}
return int64(math.Round(float64(value))), nil
case float64:
if value < float64(math.MinInt64) {
return math.MinInt64, nil
}
if value > float64(math.MaxInt64) {
return math.MaxInt64, nil
}
return int64(math.Round(value)), nil
default:
if v, err := internal.ToInt64(value); err == nil {
return v, nil
}
v, err := internal.ToFloat64(value)
if err != nil {
return 0, err
}
if v < float64(math.MinInt64) {
return math.MinInt64, nil
}
if v > float64(math.MaxInt64) {
return math.MaxInt64, nil
}
return int64(math.Round(v)), nil
}
}
func toUnsigned(v interface{}) (uint64, error) {
switch value := v.(type) {
case float32:
if value < 0 {
return 0, nil
}
if value > float32(math.MaxUint64) {
return math.MaxUint64, nil
}
return uint64(math.Round(float64(value))), nil
case float64:
if value < 0 {
return 0, nil
}
if value > float64(math.MaxUint64) {
return math.MaxUint64, nil
}
return uint64(math.Round(value)), nil
default:
if v, err := internal.ToUint64(value); err == nil {
return v, nil
}
v, err := internal.ToFloat64(value)
if err != nil {
return 0, err
}
if v < 0 {
return 0, nil
}
if v > float64(math.MaxUint64) {
return math.MaxUint64, nil
}
return uint64(math.Round(v)), nil
}
}
func toFloat(v interface{}) (float64, error) {
if v, ok := v.(string); ok && strings.HasPrefix(v, "0x") {
var i big.Int
if _, success := i.SetString(v, 0); !success {
return 0, errors.New("unable to parse string to big int")
}
var f big.Float
f.SetInt(&i)
result, _ := f.Float64()
return result, nil
}
return internal.ToFloat64(v)
}
func base64ToFloat32(encoded string) (float32, error) {
// Decode the Base64 string to bytes
decodedBytes, err := base64.StdEncoding.DecodeString(encoded)
if err != nil {
return 0, err
}
// Check if the byte length matches a float32 (4 bytes)
if len(decodedBytes) != 4 {
return 0, errors.New("decoded byte length is not 4 bytes")
}
// Convert the bytes to a string representation as per IEEE 754 of the bits
bitsStrRepresentation := fmt.Sprintf("%08b%08b%08b%08b", decodedBytes[0], decodedBytes[1], decodedBytes[2], decodedBytes[3])
// Convert the bits to a uint32
bits, err := strconv.ParseUint(bitsStrRepresentation, 2, 32)
if err != nil {
return 0, err
}
// Convert the uint32 (bits) to a float32 based on IEEE 754 binary representation
return math.Float32frombits(uint32(bits)), nil
}
func init() {
processors.Add("converter", func() telegraf.Processor {
return &Converter{}
})
}

View file

@ -0,0 +1,869 @@
package converter
import (
"math"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
func TestConverter(t *testing.T) {
tests := []struct {
name string
converter *Converter
input telegraf.Metric
expected []telegraf.Metric
}{
{
name: "from tag",
converter: &Converter{
Tags: &Conversion{
String: []string{"string"},
Integer: []string{"int"},
Unsigned: []string{"uint"},
Boolean: []string{"bool"},
Float: []string{"float"},
Tag: []string{"tag"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{
"float": "42",
"int": "42",
"uint": "42",
"bool": "true",
"string": "howdy",
"tag": "tag",
},
map[string]interface{}{},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"tag": "tag",
},
map[string]interface{}{
"float": 42.0,
"int": int64(42),
"uint": uint64(42),
"bool": true,
"string": "howdy",
},
time.Unix(0, 0),
),
},
},
{
name: "from tag unconvertible",
converter: &Converter{
Tags: &Conversion{
Integer: []string{"int"},
Unsigned: []string{"uint"},
Boolean: []string{"bool"},
Float: []string{"float"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{
"float": "a",
"int": "b",
"uint": "c",
"bool": "maybe",
},
map[string]interface{}{},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{},
time.Unix(0, 0),
),
},
},
{
name: "from string field",
converter: &Converter{
Fields: &Conversion{
String: []string{"a"},
Integer: []string{"b", "b1", "b2", "b3"},
Unsigned: []string{"c", "c1", "c2", "c3"},
Boolean: []string{"d"},
Float: []string{"e", "g"},
Tag: []string{"f"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": "howdy",
"b": "42",
"b1": "42.2",
"b2": "42.5",
"b3": "0x2A",
"c": "42",
"c1": "42.2",
"c2": "42.5",
"c3": "0x2A",
"d": "true",
"e": "42.0",
"f": "foo",
"g": "foo",
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"f": "foo",
},
map[string]interface{}{
"a": "howdy",
"b": int64(42),
"b1": int64(42),
"b2": int64(43),
"b3": int64(42),
"c": uint64(42),
"c1": uint64(42),
"c2": uint64(43),
"c3": uint64(42),
"d": true,
"e": 42.0,
},
time.Unix(0, 0),
),
},
},
{
name: "from string field unconvertible",
converter: &Converter{
Fields: &Conversion{
Integer: []string{"a"},
Unsigned: []string{"b"},
Boolean: []string{"c"},
Float: []string{"d"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": "a",
"b": "b",
"c": "c",
"d": "d",
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{},
time.Unix(0, 0),
),
},
},
{
name: "from integer field",
converter: &Converter{
Fields: &Conversion{
String: []string{"a"},
Integer: []string{"b"},
Unsigned: []string{"c", "negative_uint"},
Boolean: []string{"d", "bool_zero"},
Float: []string{"e"},
Tag: []string{"f"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": int64(42),
"b": int64(42),
"c": int64(42),
"d": int64(42),
"e": int64(42),
"f": int64(42),
"negative_uint": int64(-42),
"bool_zero": int64(0),
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"f": "42",
},
map[string]interface{}{
"a": "42",
"b": int64(42),
"c": uint64(42),
"d": true,
"e": 42.0,
"negative_uint": uint64(0),
"bool_zero": false,
},
time.Unix(0, 0),
),
},
},
{
name: "from unsigned field",
converter: &Converter{
Fields: &Conversion{
String: []string{"a"},
Integer: []string{"b", "overflow_int"},
Unsigned: []string{"c"},
Boolean: []string{"d", "bool_zero"},
Float: []string{"e"},
Tag: []string{"f"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": uint64(42),
"b": uint64(42),
"c": uint64(42),
"d": uint64(42),
"e": uint64(42),
"f": uint64(42),
"overflow_int": uint64(math.MaxUint64),
"bool_zero": uint64(0),
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"f": "42",
},
map[string]interface{}{
"a": "42",
"b": int64(42),
"c": uint64(42),
"d": true,
"e": 42.0,
"overflow_int": int64(math.MaxInt64),
"bool_zero": false,
},
time.Unix(0, 0),
),
},
},
{
name: "out of range for unsigned",
converter: &Converter{
Fields: &Conversion{
Unsigned: []string{"a", "b"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": int64(-42),
"b": math.MaxFloat64,
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": uint64(0),
"b": uint64(math.MaxUint64),
},
time.Unix(0, 0),
),
},
},
{
name: "boolean field",
converter: &Converter{
Fields: &Conversion{
String: []string{"a", "af"},
Integer: []string{"b", "bf"},
Unsigned: []string{"c", "cf"},
Boolean: []string{"d", "df"},
Float: []string{"e", "ef"},
Tag: []string{"f", "ff"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": true,
"b": true,
"c": true,
"d": true,
"e": true,
"f": true,
"af": false,
"bf": false,
"cf": false,
"df": false,
"ef": false,
"ff": false,
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"f": "true",
"ff": "false",
},
map[string]interface{}{
"a": "true",
"af": "false",
"b": int64(1),
"bf": int64(0),
"c": uint64(1),
"cf": uint64(0),
"d": true,
"df": false,
"e": 1.0,
"ef": 0.0,
},
time.Unix(0, 0),
),
},
},
{
name: "from float field",
converter: &Converter{
Fields: &Conversion{
String: []string{"a"},
Integer: []string{"b", "too_large_int", "too_small_int"},
Unsigned: []string{"c", "negative_uint", "too_large_uint", "too_small_uint"},
Boolean: []string{"d", "bool_zero"},
Float: []string{"e"},
Tag: []string{"f"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 42.0,
"b": 42.0,
"c": 42.0,
"d": 42.0,
"e": 42.0,
"f": 42.0,
"too_large_int": math.MaxFloat64,
"too_large_uint": math.MaxFloat64,
"too_small_int": -math.MaxFloat64,
"too_small_uint": -math.MaxFloat64,
"negative_uint": -42.0,
"bool_zero": 0.0,
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"f": "42",
},
map[string]interface{}{
"a": "42",
"b": int64(42),
"c": uint64(42),
"d": true,
"e": 42.0,
"too_large_int": int64(math.MaxInt64),
"too_large_uint": uint64(math.MaxUint64),
"too_small_int": int64(math.MinInt64),
"too_small_uint": uint64(0),
"negative_uint": uint64(0),
"bool_zero": false,
},
time.Unix(0, 0),
),
},
},
{
name: "globbing",
converter: &Converter{
Fields: &Conversion{
Integer: []string{"int_*"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"int_a": "1",
"int_b": "2",
"float_a": 1.0,
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"int_a": int64(1),
"int_b": int64(2),
"float_a": 1.0,
},
time.Unix(0, 0),
),
},
},
{
name: "from string field hexadecimal",
converter: &Converter{
Fields: &Conversion{
Integer: []string{"a"},
Unsigned: []string{"b"},
Float: []string{"c"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": "0x11826c",
"b": "0x11826c",
"c": "0x2139d19bb1c580ebe0",
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": int64(1147500),
"b": uint64(1147500),
"c": float64(612908836750534700000),
},
time.Unix(0, 0),
),
},
},
{
name: "from unix timestamp field",
converter: &Converter{
Fields: &Conversion{
Timestamp: []string{"time"},
TimestampFormat: "unix",
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 42.0,
"time": 1111111111,
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 42.0,
},
time.Unix(1111111111, 0),
),
},
},
{
name: "from unix timestamp tag",
converter: &Converter{
Tags: &Conversion{
Timestamp: []string{"time"},
TimestampFormat: "unix",
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{
"time": "1677610769",
},
map[string]interface{}{
"a": 41.0,
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 41.0,
},
time.Unix(1677610769, 0),
),
},
},
{
name: "from invalid timestamp tag",
converter: &Converter{
Tags: &Conversion{
Timestamp: []string{"time"},
TimestampFormat: "blah",
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{
"time": "1677610769",
},
map[string]interface{}{
"a": 41.0,
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"time": "1677610769",
},
map[string]interface{}{
"a": 41.0,
},
time.Unix(0, 0),
),
},
},
{
name: "from rfc3339 timestamp field",
converter: &Converter{
Fields: &Conversion{
Timestamp: []string{"time"},
TimestampFormat: "rfc3339",
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 42.0,
"time": "2009-02-13T23:31:30Z",
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 42.0,
},
time.Unix(1234567890, 0),
),
},
},
{
name: "from custom timestamp field",
converter: &Converter{
Fields: &Conversion{
Timestamp: []string{"time"},
TimestampFormat: "2006-01-02 15:04:05 MST",
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 42.0,
"time": "2016-03-01 02:39:59 MST",
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 42.0,
},
time.Unix(1456825199, 0),
),
},
},
{
name: "invalid timestamp format",
converter: &Converter{
Fields: &Conversion{
Timestamp: []string{"time"},
TimestampFormat: "2006-01-0",
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 42.0,
"time": "2022-07-04 01:30:59 MST",
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 42.0,
"time": "2022-07-04 01:30:59 MST",
},
time.Unix(0, 0),
),
},
},
{
name: "no timestamp format",
converter: &Converter{
Fields: &Conversion{
Timestamp: []string{"time"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 42.0,
"time": "2022-07-04 01:30:59 MST",
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 42.0,
"time": "2022-07-04 01:30:59 MST",
},
time.Unix(0, 0),
),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.converter.Log = testutil.Logger{}
require.NoError(t, tt.converter.Init())
actual := tt.converter.Apply(tt.input)
testutil.RequireMetricsEqual(t, tt.expected, actual)
})
}
}
func TestMultipleTimestamps(t *testing.T) {
c := &Converter{
Fields: &Conversion{
Timestamp: []string{"time", "date"},
TimestampFormat: "2006-01-02 15:04:05 MST",
},
Log: testutil.Logger{},
}
require.NoError(t, c.Init())
input := testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": 42.0,
"time": "1990-01-01 12:45:13 EST",
"date": "2016-03-01 02:39:59 MST",
},
time.Unix(0, 0),
)
result := c.Apply(input)
require.Len(t, result, 1)
require.Empty(t, result[0].TagList())
require.Len(t, result[0].FieldList(), 1)
}
func TestMeasurement(t *testing.T) {
tests := []struct {
name string
converter *Converter
input telegraf.Metric
expected []telegraf.Metric
}{
{
name: "measurement from tag",
converter: &Converter{
Tags: &Conversion{
Measurement: []string{"filepath"},
},
},
input: testutil.MustMetric(
"file",
map[string]string{
"filepath": "/var/log/syslog",
},
map[string]interface{}{
"msg": "howdy",
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"/var/log/syslog",
map[string]string{},
map[string]interface{}{
"msg": "howdy",
},
time.Unix(0, 0),
),
},
},
{
name: "measurement from field",
converter: &Converter{
Fields: &Conversion{
Measurement: []string{"topic"},
},
},
input: testutil.MustMetric(
"file",
map[string]string{},
map[string]interface{}{
"v": 1,
"topic": "telegraf",
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"telegraf",
map[string]string{},
map[string]interface{}{
"v": 1,
},
time.Unix(0, 0),
),
},
},
{
name: "float32 from ieee754 float32 encoded as base64",
converter: &Converter{
Fields: &Conversion{
Base64IEEEFloat32: []string{"a", "b"},
},
},
input: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": "QlAAAA==",
"b": "QlgAAA==",
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"a": float32(52),
"b": float32(54),
},
time.Unix(0, 0),
),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.converter.Log = testutil.Logger{}
require.NoError(t, tt.converter.Init())
actual := tt.converter.Apply(tt.input)
testutil.RequireMetricsEqual(t, tt.expected, actual)
})
}
}
func TestEmptyConfigInitError(t *testing.T) {
converter := &Converter{
Log: testutil.Logger{},
}
require.Error(t, converter.Init())
}
func TestTracking(t *testing.T) {
inputRaw := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42, "topic": "telegraf"}, time.Unix(0, 0)),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42, "topic": "telegraf"}, time.Unix(0, 0)),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42, "topic": "telegraf"}, time.Unix(0, 0)),
}
var mu sync.Mutex
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
notify := func(di telegraf.DeliveryInfo) {
mu.Lock()
defer mu.Unlock()
delivered = append(delivered, di)
}
input := make([]telegraf.Metric, 0, len(inputRaw))
for _, m := range inputRaw {
tm, _ := metric.WithTracking(m, notify)
input = append(input, tm)
}
expected := []telegraf.Metric{
metric.New("telegraf", map[string]string{}, map[string]interface{}{"value": 42}, time.Unix(0, 0)),
metric.New("telegraf", map[string]string{}, map[string]interface{}{"value": 42}, time.Unix(0, 0)),
metric.New("telegraf", map[string]string{}, map[string]interface{}{"value": 42}, time.Unix(0, 0)),
}
plugin := &Converter{
Fields: &Conversion{
Measurement: []string{"topic"},
},
}
require.NoError(t, plugin.Init())
// Process expected metrics and compare with resulting metrics
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
// Simulate output acknowledging delivery
for _, m := range actual {
m.Accept()
}
// Check delivery
require.Eventuallyf(t, func() bool {
mu.Lock()
defer mu.Unlock()
return len(input) == len(delivered)
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
}

View file

@ -0,0 +1,49 @@
# Convert values to another metric value type
[[processors.converter]]
## Tags to convert
##
## The table key determines the target type, and the array of key-values
## select the keys to convert. The array may contain globs.
## <target-type> = [<tag-key>...]
[processors.converter.tags]
measurement = []
string = []
integer = []
unsigned = []
boolean = []
float = []
## Optional tag to use as metric timestamp
# timestamp = []
## Format of the timestamp determined by the tag above. This can be any of
## "unix", "unix_ms", "unix_us", "unix_ns", or a valid Golang time format.
## It is required, when using the timestamp option.
# timestamp_format = ""
## Fields to convert
##
## The table key determines the target type, and the array of key-values
## select the keys to convert. The array may contain globs.
## <target-type> = [<field-key>...]
[processors.converter.fields]
measurement = []
tag = []
string = []
integer = []
unsigned = []
boolean = []
float = []
## Optional field to use for converting base64 encoding of IEEE 754 Float32 values
## i.e. data_json_content_state_openconfig-platform-psu:output-power":"RKeAAA=="
## into a float32 value 1340
# base64_ieee_float32 = []
## Optional field to use as metric timestamp
# timestamp = []
## Format of the timestamp determined by the field above. This can be any
## of "unix", "unix_ms", "unix_us", "unix_ns", or a valid Golang time
## format. It is required, when using the timestamp option.
# timestamp_format = ""

View file

@ -0,0 +1,68 @@
# Date Processor Plugin
Use the `date` processor to add the metric timestamp as a human readable tag.
A common use is to add a tag that can be used to group by month or year.
A few example usecases include:
1) consumption data for utilities on per month basis
2) bandwidth capacity per month
3) compare energy production or sales on a yearly or monthly basis
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Dates measurements, tags, and fields that pass through this filter.
[[processors.date]]
## New tag to create
tag_key = "month"
## New field to create (cannot set both field_key and tag_key)
# field_key = "month"
## Date format string, must be a representation of the Go "reference time"
## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
date_format = "Jan"
## If destination is a field, date format can also be one of
## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field.
# date_format = "unix"
## Offset duration added to the date string when writing the new tag.
# date_offset = "0s"
## Timezone to use when creating the tag or field using a reference time
## string. This can be set to one of "UTC", "Local", or to a location name
## in the IANA Time Zone database.
## example: timezone = "America/Los_Angeles"
# timezone = "UTC"
```
### timezone
On Windows, only the `Local` and `UTC` zones are available by default. To use
other timezones, set the `ZONEINFO` environment variable to the location of
[`zoneinfo.zip`][zoneinfo]:
```text
set ZONEINFO=C:\zoneinfo.zip
```
## Example
```diff
- throughput lower=10i,upper=1000i,mean=500i 1560540094000000000
+ throughput,month=Jun lower=10i,upper=1000i,mean=500i 1560540094000000000
```
[zoneinfo]: https://github.com/golang/go/raw/50bd1c4d4eb4fac8ddeb5f063c099daccfb71b26/lib/time/zoneinfo.zip

View file

@ -0,0 +1,77 @@
//go:generate ../../../tools/readme_config_includer/generator
package date
import (
_ "embed"
"errors"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/processors"
)
//go:embed sample.conf
var sampleConfig string
const defaultTimezone = "UTC"
type Date struct {
TagKey string `toml:"tag_key"`
FieldKey string `toml:"field_key"`
DateFormat string `toml:"date_format"`
DateOffset config.Duration `toml:"date_offset"`
Timezone string `toml:"timezone"`
location *time.Location
}
func (*Date) SampleConfig() string {
return sampleConfig
}
func (d *Date) Init() error {
// Check either TagKey or FieldKey specified
if len(d.FieldKey) > 0 && len(d.TagKey) > 0 {
return errors.New("field_key and tag_key cannot be specified at the same time")
} else if len(d.FieldKey) == 0 && len(d.TagKey) == 0 {
return errors.New("at least one of field_key or tag_key must be specified")
}
// LoadLocation returns UTC if timezone is the empty string.
var err error
d.location, err = time.LoadLocation(d.Timezone)
return err
}
func (d *Date) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, point := range in {
tm := point.Time().In(d.location).Add(time.Duration(d.DateOffset))
if len(d.TagKey) > 0 {
point.AddTag(d.TagKey, tm.Format(d.DateFormat))
} else if len(d.FieldKey) > 0 {
switch d.DateFormat {
case "unix":
point.AddField(d.FieldKey, tm.Unix())
case "unix_ms":
point.AddField(d.FieldKey, tm.UnixNano()/1000000)
case "unix_us":
point.AddField(d.FieldKey, tm.UnixNano()/1000)
case "unix_ns":
point.AddField(d.FieldKey, tm.UnixNano())
default:
point.AddField(d.FieldKey, tm.Format(d.DateFormat))
}
}
}
return in
}
func init() {
processors.Add("date", func() telegraf.Processor {
return &Date{
Timezone: defaultTimezone,
}
})
}

View file

@ -0,0 +1,294 @@
package date
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
func TestTagAndField(t *testing.T) {
plugin := &Date{
TagKey: "month",
FieldKey: "month",
}
require.Error(t, plugin.Init())
}
func TestNoOutputSpecified(t *testing.T) {
plugin := &Date{}
require.Error(t, plugin.Init())
}
func TestMonthTag(t *testing.T) {
now := time.Now()
month := now.Format("Jan")
input := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42}, now),
}
expected := []telegraf.Metric{
metric.New("foo", map[string]string{"month": month}, map[string]interface{}{"value": 42}, now),
metric.New("bar", map[string]string{"month": month}, map[string]interface{}{"value": 42}, now),
metric.New("baz", map[string]string{"month": month}, map[string]interface{}{"value": 42}, now),
}
plugin := &Date{
TagKey: "month",
DateFormat: "Jan",
}
require.NoError(t, plugin.Init())
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestMonthField(t *testing.T) {
now := time.Now()
month := now.Format("Jan")
input := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42}, now),
}
expected := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42, "month": month}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42, "month": month}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42, "month": month}, now),
}
plugin := &Date{
FieldKey: "month",
DateFormat: "Jan",
}
require.NoError(t, plugin.Init())
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestOldDateTag(t *testing.T) {
now := time.Date(1993, 05, 27, 0, 0, 0, 0, time.UTC)
input := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42}, now),
}
expected := []telegraf.Metric{
metric.New("foo", map[string]string{"year": "1993"}, map[string]interface{}{"value": 42}, now),
metric.New("bar", map[string]string{"year": "1993"}, map[string]interface{}{"value": 42}, now),
metric.New("baz", map[string]string{"year": "1993"}, map[string]interface{}{"value": 42}, now),
}
plugin := &Date{
TagKey: "year",
DateFormat: "2006",
}
require.NoError(t, plugin.Init())
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestFieldUnix(t *testing.T) {
now := time.Now()
ts := now.Unix()
input := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42}, now),
}
expected := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42, "unix": ts}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42, "unix": ts}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42, "unix": ts}, now),
}
plugin := &Date{
FieldKey: "unix",
DateFormat: "unix",
}
require.NoError(t, plugin.Init())
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestFieldUnixNano(t *testing.T) {
now := time.Now()
ts := now.UnixNano()
input := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42}, now),
}
expected := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42, "unix_ns": ts}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42, "unix_ns": ts}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42, "unix_ns": ts}, now),
}
plugin := &Date{
FieldKey: "unix_ns",
DateFormat: "unix_ns",
}
require.NoError(t, plugin.Init())
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestFieldUnixMillis(t *testing.T) {
now := time.Now()
ts := now.UnixMilli()
input := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42}, now),
}
expected := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42, "unix_ms": ts}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42, "unix_ms": ts}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42, "unix_ms": ts}, now),
}
plugin := &Date{
FieldKey: "unix_ms",
DateFormat: "unix_ms",
}
require.NoError(t, plugin.Init())
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestFieldUnixMicros(t *testing.T) {
now := time.Now()
ts := now.UnixMicro()
input := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42}, now),
}
expected := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42, "unix_us": ts}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42, "unix_us": ts}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42, "unix_us": ts}, now),
}
plugin := &Date{
FieldKey: "unix_us",
DateFormat: "unix_us",
}
require.NoError(t, plugin.Init())
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestDateOffset(t *testing.T) {
plugin := &Date{
TagKey: "hour",
DateFormat: "15",
DateOffset: config.Duration(2 * time.Hour),
}
require.NoError(t, plugin.Init())
input := testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(1578603600, 0),
)
expected := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"hour": "23",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(1578603600, 0),
),
}
actual := plugin.Apply(input)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestTracking(t *testing.T) {
now := time.Now()
ts := now.UnixMicro()
inputRaw := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("bar", map[string]string{}, map[string]interface{}{"value": 42}, now),
metric.New("baz", map[string]string{}, map[string]interface{}{"value": 42}, now),
}
var mu sync.Mutex
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
notify := func(di telegraf.DeliveryInfo) {
mu.Lock()
defer mu.Unlock()
delivered = append(delivered, di)
}
input := make([]telegraf.Metric, 0, len(inputRaw))
expected := make([]telegraf.Metric, 0, len(input))
for _, m := range inputRaw {
tm, _ := metric.WithTracking(m, notify)
input = append(input, tm)
em := m.Copy()
em.AddField("unix_us", ts)
expected = append(expected, m)
}
plugin := &Date{
FieldKey: "unix_us",
DateFormat: "unix_us",
}
require.NoError(t, plugin.Init())
// Process expected metrics and compare with resulting metrics
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
// Simulate output acknowledging delivery
for _, m := range actual {
m.Accept()
}
// Check delivery
require.Eventuallyf(t, func() bool {
mu.Lock()
defer mu.Unlock()
return len(input) == len(delivered)
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
}

View file

@ -0,0 +1,24 @@
# Dates measurements, tags, and fields that pass through this filter.
[[processors.date]]
## New tag to create
tag_key = "month"
## New field to create (cannot set both field_key and tag_key)
# field_key = "month"
## Date format string, must be a representation of the Go "reference time"
## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
date_format = "Jan"
## If destination is a field, date format can also be one of
## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field.
# date_format = "unix"
## Offset duration added to the date string when writing the new tag.
# date_offset = "0s"
## Timezone to use when creating the tag or field using a reference time
## string. This can be set to one of "UTC", "Local", or to a location name
## in the IANA Time Zone database.
## example: timezone = "America/Los_Angeles"
# timezone = "UTC"

View file

@ -0,0 +1,36 @@
# Dedup Processor Plugin
Filter metrics whose field values are exact repetitions of the previous values.
This plugin will store its state between runs if the `statefile` option in the
agent config section is set.
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Filter metrics with repeating field values
[[processors.dedup]]
## Maximum time to suppress output
dedup_interval = "600s"
```
## Example
```diff
- cpu,cpu=cpu0 time_idle=42i,time_guest=1i
- cpu,cpu=cpu0 time_idle=42i,time_guest=2i
- cpu,cpu=cpu0 time_idle=42i,time_guest=2i
- cpu,cpu=cpu0 time_idle=44i,time_guest=2i
- cpu,cpu=cpu0 time_idle=44i,time_guest=2i
+ cpu,cpu=cpu0 time_idle=42i,time_guest=1i
+ cpu,cpu=cpu0 time_idle=42i,time_guest=2i
+ cpu,cpu=cpu0 time_idle=44i,time_guest=2i
```

View file

@ -0,0 +1,161 @@
//go:generate ../../../tools/readme_config_includer/generator
package dedup
import (
_ "embed"
"fmt"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/parsers/influx"
"github.com/influxdata/telegraf/plugins/processors"
serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx"
)
//go:embed sample.conf
var sampleConfig string
type Dedup struct {
DedupInterval config.Duration `toml:"dedup_interval"`
FlushTime time.Time
Cache map[uint64]telegraf.Metric
Log telegraf.Logger `toml:"-"`
}
// Remove expired items from cache
func (d *Dedup) cleanup() {
// No need to cleanup cache too often. Lets save some CPU
if time.Since(d.FlushTime) < time.Duration(d.DedupInterval) {
return
}
d.FlushTime = time.Now()
keep := make(map[uint64]telegraf.Metric)
for id, metric := range d.Cache {
if time.Since(metric.Time()) < time.Duration(d.DedupInterval) {
keep[id] = metric
}
}
d.Cache = keep
}
// Save item to cache
func (d *Dedup) save(metric telegraf.Metric, id uint64) {
d.Cache[id] = metric.Copy()
d.Cache[id].Accept()
}
func (*Dedup) SampleConfig() string {
return sampleConfig
}
// main processing method
func (d *Dedup) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
idx := 0
for _, metric := range metrics {
id := metric.HashID()
m, ok := d.Cache[id]
// If not in cache then just save it
if !ok {
d.save(metric, id)
metrics[idx] = metric
idx++
continue
}
// If cache item has expired then refresh it
if time.Since(m.Time()) >= time.Duration(d.DedupInterval) {
d.save(metric, id)
metrics[idx] = metric
idx++
continue
}
// For each field compare value with the cached one
changed := false
added := false
sametime := metric.Time() == m.Time()
for _, f := range metric.FieldList() {
if value, ok := m.GetField(f.Key); ok {
if value != f.Value {
changed = true
break
}
} else if sametime {
// This field isn't in the cached metric but it's the
// same series and timestamp. Merge it into the cached
// metric.
// Metrics have a ValueType that applies to all values
// in the metric. If an input needs to produce values
// with different ValueTypes but the same timestamp,
// they have to produce multiple metrics. (See the
// system input for an example.) In this case, dedup
// ignores the ValueTypes of the metrics and merges
// the fields into one metric for the dup check.
m.AddField(f.Key, f.Value)
added = true
}
}
// If any field value has changed then refresh the cache
if changed {
d.save(metric, id)
metrics[idx] = metric
idx++
continue
}
if sametime && added {
metrics[idx] = metric
idx++
continue
}
// In any other case remove metric from the output
metric.Drop()
}
metrics = metrics[:idx]
d.cleanup()
return metrics
}
func (d *Dedup) GetState() interface{} {
s := &serializers_influx.Serializer{}
v := make([]telegraf.Metric, 0, len(d.Cache))
for _, value := range d.Cache {
v = append(v, value)
}
state, err := s.SerializeBatch(v)
if err != nil {
d.Log.Errorf("dedup processor failed to serialize metric batch: %v", err)
}
return state
}
func (d *Dedup) SetState(state interface{}) error {
p := &influx.Parser{}
if err := p.Init(); err != nil {
return err
}
data, ok := state.([]byte)
if !ok {
return fmt.Errorf("state has wrong type %T", state)
}
metrics, err := p.Parse(data)
if err == nil {
d.Apply(metrics...)
}
return nil
}
func init() {
processors.Add("dedup", func() telegraf.Processor {
return &Dedup{
DedupInterval: config.Duration(10 * time.Minute),
FlushTime: time.Now(),
Cache: make(map[uint64]telegraf.Metric),
}
})
}

View file

@ -0,0 +1,532 @@
package dedup
import (
"fmt"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
func TestMetrics(t *testing.T) {
now := time.Now()
tests := []struct {
name string
input []telegraf.Metric
expected []telegraf.Metric
cacheContent []telegraf.Metric
}{
{
name: "retain metric",
input: []telegraf.Metric{
metric.New("m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now,
),
},
expected: []telegraf.Metric{
metric.New("m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now,
),
},
cacheContent: []telegraf.Metric{
metric.New("m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now,
),
},
},
{
name: "suppress repeated metric",
input: []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-1*time.Second),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now,
),
},
expected: []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-1*time.Second),
),
},
cacheContent: []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-1*time.Second),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-1*time.Second),
),
},
},
{
name: "pass updated metric",
input: []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-1*time.Second),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 2},
now,
),
},
expected: []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-1*time.Second),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 2},
now,
),
},
cacheContent: []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-1*time.Second),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 2},
now,
),
},
},
{
name: "pass after cache expired",
input: []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-1*time.Hour),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now,
),
},
expected: []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-1*time.Hour),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now,
),
},
cacheContent: []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-1*time.Hour),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now,
),
},
},
{
name: "cache retains metrics",
input: []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-3*time.Hour),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-2*time.Hour),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now,
),
},
expected: []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-3*time.Hour),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-2*time.Hour),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now,
),
},
cacheContent: []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-3*time.Hour),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-2*time.Hour),
),
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now,
),
},
},
{
name: "same timestamp",
input: []telegraf.Metric{
metric.New("metric",
map[string]string{"tag": "value"},
map[string]interface{}{"foo": 1}, // field
now,
),
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"bar": 1}, // different field
now,
),
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"bar": 2}, // same field different value
now,
),
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"bar": 2}, // same field same value
now,
),
},
expected: []telegraf.Metric{
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"foo": 1},
now,
),
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"bar": 1},
now,
),
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"bar": 2},
now,
),
},
cacheContent: []telegraf.Metric{
metric.New("metric",
map[string]string{"tag": "value"},
map[string]interface{}{"foo": 1},
now,
),
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"foo": 1, "bar": 1},
now,
),
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"bar": 2},
now,
),
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"bar": 2},
now,
),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Create plugin instance
plugin := &Dedup{
DedupInterval: config.Duration(10 * time.Minute),
FlushTime: now.Add(-1 * time.Second),
Cache: make(map[uint64]telegraf.Metric),
}
// Feed the input metrics and record the outputs
var actual []telegraf.Metric
for i, m := range tt.input {
actual = append(actual, plugin.Apply(m)...)
// Check the cache content
if cm := tt.cacheContent[i]; cm == nil {
require.Empty(t, plugin.Cache)
} else {
id := m.HashID()
require.NotEmpty(t, plugin.Cache)
require.Contains(t, plugin.Cache, id)
testutil.RequireMetricEqual(t, cm, plugin.Cache[id])
}
}
// Check if we got the expected metrics
testutil.RequireMetricsEqual(t, tt.expected, actual)
})
}
}
func TestCacheShrink(t *testing.T) {
now := time.Now()
// Time offset is more than 2 * DedupInterval
plugin := &Dedup{
DedupInterval: config.Duration(10 * time.Minute),
FlushTime: now.Add(-2 * time.Hour),
Cache: make(map[uint64]telegraf.Metric),
}
// Time offset is more than 1 * DedupInterval
input := []telegraf.Metric{
metric.New(
"m1",
map[string]string{"tag": "tag_value"},
map[string]interface{}{"value": 1},
now.Add(-1*time.Hour),
),
}
actual := plugin.Apply(input...)
expected := input
testutil.RequireMetricsEqual(t, expected, actual)
require.Empty(t, plugin.Cache)
}
func TestTracking(t *testing.T) {
now := time.Now()
inputRaw := []telegraf.Metric{
metric.New("metric",
map[string]string{"tag": "value"},
map[string]interface{}{"foo": 1},
now.Add(-2*time.Second),
),
metric.New("metric",
map[string]string{"tag": "pass"},
map[string]interface{}{"foo": 1},
now.Add(-2*time.Second),
),
metric.New("metric",
map[string]string{"tag": "value"},
map[string]interface{}{"foo": 1},
now.Add(-1*time.Second),
),
metric.New("metric",
map[string]string{"tag": "pass"},
map[string]interface{}{"foo": 1},
now.Add(-1*time.Second),
),
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"foo": 3},
now,
),
}
var mu sync.Mutex
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
notify := func(di telegraf.DeliveryInfo) {
mu.Lock()
defer mu.Unlock()
delivered = append(delivered, di)
}
input := make([]telegraf.Metric, 0, len(inputRaw))
for _, m := range inputRaw {
tm, _ := metric.WithTracking(m, notify)
input = append(input, tm)
}
expected := []telegraf.Metric{
metric.New("metric",
map[string]string{"tag": "value"},
map[string]interface{}{"foo": 1},
now.Add(-2*time.Second),
),
metric.New("metric",
map[string]string{"tag": "pass"},
map[string]interface{}{"foo": 1},
now.Add(-2*time.Second),
),
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"foo": 3},
now,
),
}
// Create plugin instance
plugin := &Dedup{
DedupInterval: config.Duration(10 * time.Minute),
FlushTime: now.Add(-1 * time.Second),
Cache: make(map[uint64]telegraf.Metric),
}
// Process expected metrics and compare with resulting metrics
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
// Simulate output acknowledging delivery
for _, m := range actual {
m.Accept()
}
// Check delivery
require.Eventuallyf(t, func() bool {
mu.Lock()
defer mu.Unlock()
return len(input) == len(delivered)
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
}
func TestStatePersistence(t *testing.T) {
now := time.Now()
// Define the metrics and states
state := fmt.Sprintf("metric,tag=value foo=1i %d\n", now.Add(-1*time.Minute).UnixNano())
input := []telegraf.Metric{
metric.New("metric",
map[string]string{"tag": "value"},
map[string]interface{}{"foo": 1},
now.Add(-2*time.Second),
),
metric.New("metric",
map[string]string{"tag": "pass"},
map[string]interface{}{"foo": 1},
now.Add(-1*time.Second),
),
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"foo": 3},
now,
),
}
expected := []telegraf.Metric{
metric.New("metric",
map[string]string{"tag": "pass"},
map[string]interface{}{"foo": 1},
now.Add(-1*time.Second),
),
metric.New(
"metric",
map[string]string{"tag": "value"},
map[string]interface{}{"foo": 3},
now,
),
}
expectedState := []string{
fmt.Sprintf("metric,tag=pass foo=1i %d\n", now.Add(-1*time.Second).UnixNano()),
fmt.Sprintf("metric,tag=value foo=3i %d\n", now.UnixNano()),
}
// Configure the plugin
plugin := &Dedup{
DedupInterval: config.Duration(10 * time.Hour), // use a long interval to avoid flaky tests
FlushTime: now.Add(-1 * time.Second),
Cache: make(map[uint64]telegraf.Metric),
}
require.Empty(t, plugin.Cache)
// Setup the "persisted" state
var pi telegraf.StatefulPlugin = plugin
require.NoError(t, pi.SetState([]byte(state)))
require.Len(t, plugin.Cache, 1)
// Process expected metrics and compare with resulting metrics
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
// Check getting the persisted state
// Because the cache is a map, the order of metrics in the state is not
// guaranteed, so check the string contents regardless of the order.
actualState, ok := pi.GetState().([]byte)
require.True(t, ok, "state is not a bytes array")
var expectedLen int
for _, m := range expectedState {
require.Contains(t, string(actualState), m)
expectedLen += len(m)
}
require.Len(t, actualState, expectedLen)
}

View file

@ -0,0 +1,4 @@
# Filter metrics with repeating field values
[[processors.dedup]]
## Maximum time to suppress output
dedup_interval = "600s"

View file

@ -0,0 +1,65 @@
# Defaults Processor Plugin
The _Defaults_ processor allows you to ensure certain fields will always exist
with a specified default value on your metric(s).
There are three cases where this processor will insert a configured default
field.
1. The field is nil on the incoming metric
1. The field is not nil, but its value is an empty string.
1. The field is not nil, but its value is a string of one or more empty spaces.
Telegraf minimum version: Telegraf 1.15.0
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
## Set default fields on your metric(s) when they are nil or empty
[[processors.defaults]]
## Ensures a set of fields always exists on your metric(s) with their
## respective default value.
## For any given field pair (key = default), if it's not set, a field
## is set on the metric with the specified default.
##
## A field is considered not set if it is nil on the incoming metric;
## or it is not nil but its value is an empty string or is a string
## of one or more spaces.
## <target-field> = <value>
[processors.defaults.fields]
field_1 = "bar"
time_idle = 0
is_error = true
```
## Example
Ensure a _status\_code_ field with _N/A_ is inserted in the metric when one is
not set in the metric by default:
```toml
[[processors.defaults]]
[processors.defaults.fields]
status_code = "N/A"
```
```diff
- lb,http_method=GET cache_status=HIT,latency=230
+ lb,http_method=GET cache_status=HIT,latency=230,status_code="N/A"
```
Ensure an empty string gets replaced by a default:
```diff
- lb,http_method=GET cache_status=HIT,latency=230,status_code=""
+ lb,http_method=GET cache_status=HIT,latency=230,status_code="N/A"
```

View file

@ -0,0 +1,56 @@
//go:generate ../../../tools/readme_config_includer/generator
package defaults
import (
_ "embed"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/processors"
)
//go:embed sample.conf
var sampleConfig string
// Defaults is a processor for ensuring certain fields always exist
// on your Metrics with at least a default value.
type Defaults struct {
DefaultFieldsSets map[string]interface{} `toml:"fields"`
}
func (*Defaults) SampleConfig() string {
return sampleConfig
}
// Apply contains the main implementation of this processor.
// For each metric in 'inputMetrics', it goes over each default pair.
// If the field in the pair does not exist on the metric, the associated default is added.
// If the field was found, then, if its value is the empty string or one or more spaces, it is replaced
// by the associated default.
func (def *Defaults) Apply(inputMetrics ...telegraf.Metric) []telegraf.Metric {
for _, metric := range inputMetrics {
for defField, defValue := range def.DefaultFieldsSets {
if maybeCurrent, isSet := metric.GetField(defField); !isSet {
metric.AddField(defField, defValue)
} else if trimmed, isStr := maybeTrimmedString(maybeCurrent); isStr && trimmed == "" {
metric.RemoveField(defField)
metric.AddField(defField, defValue)
}
}
}
return inputMetrics
}
func maybeTrimmedString(v interface{}) (string, bool) {
if value, ok := v.(string); ok {
return strings.TrimSpace(value), true
}
return "", false
}
func init() {
processors.Add("defaults", func() telegraf.Processor {
return &Defaults{}
})
}

View file

@ -0,0 +1,200 @@
package defaults
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
func TestDefaults(t *testing.T) {
scenarios := []struct {
name string
defaults *Defaults
input telegraf.Metric
expected []telegraf.Metric
}{
{
name: "Test that no values are changed since they are not nil or empty",
defaults: &Defaults{
DefaultFieldsSets: map[string]interface{}{
"usage": 30,
"wind_feel": "very chill",
"is_dead": true,
},
},
input: testutil.MustMetric(
"CPU metrics",
map[string]string{},
map[string]interface{}{
"usage": 45,
"wind_feel": "a dragon's breath",
"is_dead": false,
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"CPU metrics",
map[string]string{},
map[string]interface{}{
"usage": 45,
"wind_feel": "a dragon's breath",
"is_dead": false,
},
time.Unix(0, 0),
),
},
},
{
name: "Tests that the missing fields are set on the metric",
defaults: &Defaults{
DefaultFieldsSets: map[string]interface{}{
"max_clock_gz": 6,
"wind_feel": "Unknown",
"boost_enabled": false,
"variance": 1.2,
},
},
input: testutil.MustMetric(
"CPU metrics",
map[string]string{},
map[string]interface{}{
"usage": 45,
"temperature": 64,
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"CPU metrics",
map[string]string{},
map[string]interface{}{
"usage": 45,
"temperature": 64,
"max_clock_gz": 6,
"wind_feel": "Unknown",
"boost_enabled": false,
"variance": 1.2,
},
time.Unix(0, 0),
),
},
},
{
name: "Tests that set but empty fields are replaced by specified defaults",
defaults: &Defaults{
DefaultFieldsSets: map[string]interface{}{
"max_clock_gz": 6,
"wind_feel": "Unknown",
"fan_loudness": "Inaudible",
"boost_enabled": false,
},
},
input: testutil.MustMetric(
"CPU metrics",
map[string]string{},
map[string]interface{}{
"max_clock_gz": "",
"wind_feel": " ",
"fan_loudness": " ",
},
time.Unix(0, 0),
),
expected: []telegraf.Metric{
testutil.MustMetric(
"CPU metrics",
map[string]string{},
map[string]interface{}{
"max_clock_gz": 6,
"wind_feel": "Unknown",
"fan_loudness": "Inaudible",
"boost_enabled": false,
},
time.Unix(0, 0),
),
},
},
}
for _, scenario := range scenarios {
t.Run(scenario.name, func(t *testing.T) {
defaults := scenario.defaults
resultMetrics := defaults.Apply(scenario.input)
require.Len(t, resultMetrics, 1)
testutil.RequireMetricsEqual(t, scenario.expected, resultMetrics)
})
}
}
func TestTracking(t *testing.T) {
inputRaw := []telegraf.Metric{
metric.New("foo", map[string]string{}, map[string]interface{}{"value": 42, "topic": "telegraf"}, time.Unix(0, 0)),
metric.New("bar", map[string]string{}, map[string]interface{}{"hours": 23}, time.Unix(0, 0)),
metric.New("baz", map[string]string{}, map[string]interface{}{"status": "fixed"}, time.Unix(0, 0)),
}
var mu sync.Mutex
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
notify := func(di telegraf.DeliveryInfo) {
mu.Lock()
defer mu.Unlock()
delivered = append(delivered, di)
}
input := make([]telegraf.Metric, 0, len(inputRaw))
for _, m := range inputRaw {
tm, _ := metric.WithTracking(m, notify)
input = append(input, tm)
}
expected := []telegraf.Metric{
metric.New(
"foo",
map[string]string{},
map[string]interface{}{"value": 42, "status": "unknown", "topic": "telegraf"},
time.Unix(0, 0),
),
metric.New(
"bar",
map[string]string{},
map[string]interface{}{"value": 6, "status": "unknown", "hours": 23},
time.Unix(0, 0),
),
metric.New(
"baz",
map[string]string{},
map[string]interface{}{"value": 6, "status": "fixed"},
time.Unix(0, 0),
),
}
plugin := &Defaults{
DefaultFieldsSets: map[string]interface{}{
"value": 6,
"status": "unknown",
},
}
// Process expected metrics and compare with resulting metrics
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
// Simulate output acknowledging delivery
for _, m := range actual {
m.Accept()
}
// Check delivery
require.Eventuallyf(t, func() bool {
mu.Lock()
defer mu.Unlock()
return len(input) == len(delivered)
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
}

View file

@ -0,0 +1,15 @@
## Set default fields on your metric(s) when they are nil or empty
[[processors.defaults]]
## Ensures a set of fields always exists on your metric(s) with their
## respective default value.
## For any given field pair (key = default), if it's not set, a field
## is set on the metric with the specified default.
##
## A field is considered not set if it is nil on the incoming metric;
## or it is not nil but its value is an empty string or is a string
## of one or more spaces.
## <target-field> = <value>
[processors.defaults.fields]
field_1 = "bar"
time_idle = 0
is_error = true

View file

@ -0,0 +1,6 @@
package processors
import "github.com/influxdata/telegraf"
// Deprecations lists the deprecated plugins
var Deprecations = make(map[string]telegraf.DeprecationInfo)

View file

@ -0,0 +1,61 @@
# Enum Processor Plugin
The Enum Processor allows the configuration of value mappings for metric tags or
fields. The main use-case for this is to rewrite status codes such as _red_,
_amber_ and _green_ by numeric values such as 0, 1, 2. The plugin supports
string, int, float64 and bool types for the field values. Multiple tags or
fields can be configured with separate value mappings for each. Default mapping
values can be configured to be used for all values, which are not contained in
the value_mappings. The processor supports explicit configuration of a
destination tag or field. By default the source tag or field is overwritten.
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Map enum values according to given table.
[[processors.enum]]
[[processors.enum.mapping]]
## Name of the field to map. Globs accepted.
field = "status"
## Name of the tag to map. Globs accepted.
# tag = "status"
## Destination tag or field to be used for the mapped value. By default the
## source tag or field is used, overwriting the original value.
dest = "status_code"
## Default value to be used for all values not contained in the mapping
## table. When unset and no match is found, the original field will remain
## unmodified and the destination tag or field will not be created.
# default = 0
## Table of mappings
[processors.enum.mapping.value_mappings]
green = 1
amber = 2
red = 3
```
## Example
```diff
- xyzzy status="green" 1502489900000000000
+ xyzzy status="green",status_code=1i 1502489900000000000
```
With unknown value and no default set:
```diff
- xyzzy status="black" 1502489900000000000
+ xyzzy status="black" 1502489900000000000
```

View file

@ -0,0 +1,165 @@
//go:generate ../../../tools/readme_config_includer/generator
package enum
import (
_ "embed"
"fmt"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/plugins/processors"
)
//go:embed sample.conf
var sampleConfig string
type EnumMapper struct {
Mappings []Mapping `toml:"mapping"`
FieldFilters map[string]filter.Filter
TagFilters map[string]filter.Filter
}
type Mapping struct {
Tag string
Field string
Dest string
Default interface{}
ValueMappings map[string]interface{}
}
func (*EnumMapper) SampleConfig() string {
return sampleConfig
}
func (mapper *EnumMapper) Init() error {
mapper.FieldFilters = make(map[string]filter.Filter)
mapper.TagFilters = make(map[string]filter.Filter)
for _, mapping := range mapper.Mappings {
if mapping.Field != "" {
fieldFilter, err := filter.NewIncludeExcludeFilter([]string{mapping.Field}, nil)
if err != nil {
return fmt.Errorf("failed to create new field filter: %w", err)
}
mapper.FieldFilters[mapping.Field] = fieldFilter
}
if mapping.Tag != "" {
tagFilter, err := filter.NewIncludeExcludeFilter([]string{mapping.Tag}, nil)
if err != nil {
return fmt.Errorf("failed to create new tag filter: %w", err)
}
mapper.TagFilters[mapping.Tag] = tagFilter
}
}
return nil
}
func (mapper *EnumMapper) Apply(in ...telegraf.Metric) []telegraf.Metric {
for i := 0; i < len(in); i++ {
in[i] = mapper.applyMappings(in[i])
}
return in
}
func (mapper *EnumMapper) applyMappings(metric telegraf.Metric) telegraf.Metric {
newFields := make(map[string]interface{})
newTags := make(map[string]string)
for _, mapping := range mapper.Mappings {
if mapping.Field != "" {
mapper.fieldMapping(metric, mapping, newFields)
}
if mapping.Tag != "" {
mapper.tagMapping(metric, mapping, newTags)
}
}
for k, v := range newFields {
writeField(metric, k, v)
}
for k, v := range newTags {
writeTag(metric, k, v)
}
return metric
}
func (mapper *EnumMapper) fieldMapping(metric telegraf.Metric, mapping Mapping, newFields map[string]interface{}) {
fields := metric.FieldList()
for _, f := range fields {
if mapper.FieldFilters[mapping.Field].Match(f.Key) {
if adjustedValue, isString := adjustValue(f.Value).(string); isString {
if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent {
newFields[mapping.getDestination(f.Key)] = mappedValue
}
}
}
}
}
func (mapper *EnumMapper) tagMapping(metric telegraf.Metric, mapping Mapping, newTags map[string]string) {
tags := metric.TagList()
for _, t := range tags {
if mapper.TagFilters[mapping.Tag].Match(t.Key) {
if mappedValue, isMappedValuePresent := mapping.mapValue(t.Value); isMappedValuePresent {
switch val := mappedValue.(type) {
case string:
newTags[mapping.getDestination(t.Key)] = val
default:
newTags[mapping.getDestination(t.Key)] = fmt.Sprintf("%v", val)
}
}
}
}
}
func adjustValue(in interface{}) interface{} {
switch val := in.(type) {
case bool:
return strconv.FormatBool(val)
case int64:
return strconv.FormatInt(val, 10)
case float64:
return strconv.FormatFloat(val, 'f', -1, 64)
case uint64:
return strconv.FormatUint(val, 10)
default:
return in
}
}
func (mapping *Mapping) mapValue(original string) (interface{}, bool) {
if mapped, found := mapping.ValueMappings[original]; found {
return mapped, true
}
if mapping.Default != nil {
return mapping.Default, true
}
return original, false
}
func (mapping *Mapping) getDestination(defaultDest string) string {
if mapping.Dest != "" {
return mapping.Dest
}
return defaultDest
}
func writeField(metric telegraf.Metric, name string, value interface{}) {
metric.RemoveField(name)
metric.AddField(name, value)
}
func writeTag(metric telegraf.Metric, name, value string) {
metric.RemoveTag(name)
metric.AddTag(name, value)
}
func init() {
processors.Add("enum", func() telegraf.Processor {
return &EnumMapper{}
})
}

View file

@ -0,0 +1,219 @@
package enum
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
)
func createTestMetric() telegraf.Metric {
m := metric.New("m1",
map[string]string{
"tag": "tag_value",
"duplicate_tag": "tag_value",
},
map[string]interface{}{
"string_value": "test",
"duplicate_string_value": "test",
"int_value": 200,
"uint_value": uint(500),
"float_value": float64(3.14),
"true_value": true,
},
time.Now(),
)
return m
}
func calculateProcessedValues(mapper EnumMapper, m telegraf.Metric) map[string]interface{} {
processed := mapper.Apply(m)
return processed[0].Fields()
}
func calculateProcessedTags(mapper EnumMapper, m telegraf.Metric) map[string]string {
processed := mapper.Apply(m)
return processed[0].Tags()
}
func assertFieldValue(t *testing.T, expected interface{}, field string, fields map[string]interface{}) {
value, present := fields[field]
require.True(t, present, "value of field '"+field+"' was not present")
require.EqualValues(t, expected, value)
}
func assertTagValue(t *testing.T, expected interface{}, tag string, tags map[string]string) {
value, present := tags[tag]
require.True(t, present, "value of tag '"+tag+"' was not present")
require.EqualValues(t, expected, value)
}
func TestRetainsMetric(t *testing.T) {
mapper := EnumMapper{}
err := mapper.Init()
require.NoError(t, err)
source := createTestMetric()
target := mapper.Apply(source)[0]
fields := target.Fields()
assertFieldValue(t, "test", "string_value", fields)
assertFieldValue(t, 200, "int_value", fields)
assertFieldValue(t, 500, "uint_value", fields)
assertFieldValue(t, float64(3.14), "float_value", fields)
assertFieldValue(t, true, "true_value", fields)
require.Equal(t, "m1", target.Name())
require.Equal(t, source.Tags(), target.Tags())
require.Equal(t, source.Time(), target.Time())
}
func TestMapsSingleStringValueTag(t *testing.T) {
mapper := EnumMapper{Mappings: []Mapping{{Tag: "tag", ValueMappings: map[string]interface{}{"tag_value": "valuable"}}}}
err := mapper.Init()
require.NoError(t, err)
tags := calculateProcessedTags(mapper, createTestMetric())
assertTagValue(t, "valuable", "tag", tags)
}
func TestMappings(t *testing.T) {
mappings := []map[string][]interface{}{
{
"field_name": []interface{}{"string_value"},
"target_values": []interface{}{"test", "test", "test", "not_test", "50", "true"},
"mapped_values": []interface{}{"test_1", 5, true, "test_1", 10, false},
"expected_values": []interface{}{"test_1", 5, true, "test", "test", "test"},
},
{
"field_name": []interface{}{"true_value"},
"target_value": []interface{}{"true", "true", "true", "false", "test", "5"},
"mapped_value": []interface{}{false, 1, "false", false, false, false},
"expected_value": []interface{}{false, 1, "false", true, true, true},
},
{
"field_name": []interface{}{"int_value"},
"target_value": []interface{}{"200", "200", "200", "200", "test", "5"},
"mapped_value": []interface{}{"http_ok", true, 1, float64(200.001), false, false},
"expected_value": []interface{}{"http_ok", true, 1, float64(200.001), 200, 200},
},
{
"field_name": []interface{}{"uint_value"},
"target_value": []interface{}{"500", "500", "500", "test", "false", "5"},
"mapped_value": []interface{}{"internal_error", 1, false, false, false, false},
"expected_value": []interface{}{"internal_error", 1, false, 500, 500, 500},
},
{
"field_name": []interface{}{"float_value"},
"target_value": []interface{}{"3.14", "3.14", "3.14", "3.14", "not_float", "5"},
"mapped_value": []interface{}{"pi", 1, false, float64(100.2), float64(3.14), "pi"},
"expected_value": []interface{}{"pi", 1, false, float64(100.2), float64(3.14), float64(3.14)},
},
}
for _, mapping := range mappings {
fieldName := mapping["field_name"][0].(string)
for index := range mapping["target_value"] {
mapper := EnumMapper{
Mappings: []Mapping{
{Field: fieldName, ValueMappings: map[string]interface{}{mapping["target_value"][index].(string): mapping["mapped_value"][index]}},
},
}
err := mapper.Init()
require.NoError(t, err)
fields := calculateProcessedValues(mapper, createTestMetric())
assertFieldValue(t, mapping["expected_value"][index], fieldName, fields)
}
}
}
func TestMapsToDefaultValueOnUnknownSourceValue(t *testing.T) {
mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"other": int64(1)}}}}
err := mapper.Init()
require.NoError(t, err)
fields := calculateProcessedValues(mapper, createTestMetric())
assertFieldValue(t, 42, "string_value", fields)
}
func TestDoNotMapToDefaultValueKnownSourceValue(t *testing.T) {
mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"test": int64(1)}}}}
err := mapper.Init()
require.NoError(t, err)
fields := calculateProcessedValues(mapper, createTestMetric())
assertFieldValue(t, 1, "string_value", fields)
}
func TestNoMappingWithoutDefaultOrDefinedMappingValue(t *testing.T) {
mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", ValueMappings: map[string]interface{}{"other": int64(1)}}}}
err := mapper.Init()
require.NoError(t, err)
fields := calculateProcessedValues(mapper, createTestMetric())
assertFieldValue(t, "test", "string_value", fields)
}
func TestWritesToDestination(t *testing.T) {
mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Dest: "string_code", ValueMappings: map[string]interface{}{"test": int64(1)}}}}
err := mapper.Init()
require.NoError(t, err)
fields := calculateProcessedValues(mapper, createTestMetric())
assertFieldValue(t, "test", "string_value", fields)
assertFieldValue(t, 1, "string_code", fields)
}
func TestDoNotWriteToDestinationWithoutDefaultOrDefinedMapping(t *testing.T) {
field := "string_code"
mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Dest: field, ValueMappings: map[string]interface{}{"other": int64(1)}}}}
err := mapper.Init()
require.NoError(t, err)
fields := calculateProcessedValues(mapper, createTestMetric())
assertFieldValue(t, "test", "string_value", fields)
_, present := fields[field]
require.False(t, present, "value of field '"+field+"' was present")
}
func TestFieldGlobMatching(t *testing.T) {
mapper := EnumMapper{Mappings: []Mapping{{Field: "*", ValueMappings: map[string]interface{}{"test": "glob"}}}}
err := mapper.Init()
require.NoError(t, err)
fields := calculateProcessedValues(mapper, createTestMetric())
assertFieldValue(t, "glob", "string_value", fields)
assertFieldValue(t, "glob", "duplicate_string_value", fields)
}
func TestTagGlobMatching(t *testing.T) {
mapper := EnumMapper{Mappings: []Mapping{{Tag: "*", ValueMappings: map[string]interface{}{"tag_value": "glob"}}}}
err := mapper.Init()
require.NoError(t, err)
tags := calculateProcessedTags(mapper, createTestMetric())
assertTagValue(t, "glob", "tag", tags)
}
func TestTracking(t *testing.T) {
m := createTestMetric()
var delivered bool
notify := func(telegraf.DeliveryInfo) {
delivered = true
}
m, _ = metric.WithTracking(m, notify)
mapper := EnumMapper{Mappings: []Mapping{{Tag: "*", ValueMappings: map[string]interface{}{"tag_value": "glob"}}}}
err := mapper.Init()
require.NoError(t, err)
actual := mapper.Apply(m)[0]
assertTagValue(t, "glob", "tag", actual.Tags())
actual.Accept()
require.Eventually(t, func() bool {
return delivered
}, time.Second, 100*time.Millisecond, "no metrics delivered")
}

View file

@ -0,0 +1,23 @@
# Map enum values according to given table.
[[processors.enum]]
[[processors.enum.mapping]]
## Name of the field to map. Globs accepted.
field = "status"
## Name of the tag to map. Globs accepted.
# tag = "status"
## Destination tag or field to be used for the mapped value. By default the
## source tag or field is used, overwriting the original value.
dest = "status_code"
## Default value to be used for all values not contained in the mapping
## table. When unset and no match is found, the original field will remain
## unmodified and the destination tag or field will not be created.
# default = 0
## Table of mappings
[processors.enum.mapping.value_mappings]
green = 1
amber = 2
red = 3

View file

@ -0,0 +1,139 @@
# Execd Processor Plugin
The `execd` processor plugin runs an external program as a separate process and
pipes metrics in to the process's STDIN and reads processed metrics from its
STDOUT. The programs must accept influx line protocol on standard in (STDIN)
and output metrics in influx line protocol to standard output (STDOUT).
Program output on standard error is mirrored to the telegraf log.
Telegraf minimum version: Telegraf 1.15.0
## Caveats
- Metrics with tracking will be considered "delivered" as soon as they are passed
to the external process. There is currently no way to match up which metric
coming out of the execd process relates to which metric going in (keep in mind
that processors can add and drop metrics, and that this is all done
asynchronously).
- it's not currently possible to use a data_format other than "influx", due to
the requirement that it is serialize-parse symmetrical and does not lose any
critical type data.
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Run executable as long-running processor plugin
[[processors.execd]]
## One program to run as daemon.
## NOTE: process and each argument should each be their own string
## eg: command = ["/path/to/your_program", "arg1", "arg2"]
command = ["cat"]
## Environment variables
## Array of "key=value" pairs to pass as environment variables
## e.g. "KEY=value", "USERNAME=John Doe",
## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs"
# environment = []
## Delay before the process is restarted after an unexpected termination
# restart_delay = "10s"
## Serialization format for communicating with the executed program
## Please note that the corresponding data-format must exist both in
## parsers and serializers
# data_format = "influx"
```
## Example
### Go daemon example
This go daemon reads a metric from stdin, multiplies the "count" field by 2,
and writes the metric back out.
```go
package main
import (
"fmt"
"os"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/parsers/influx"
serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx"
)
func main() {
parser := influx.NewStreamParser(os.Stdin)
serializer := serializers_influx.Serializer{}
if err := serializer.Init(); err != nil {
fmt.Fprintf(os.Stderr, "serializer init failed: %v\n", err)
os.Exit(1)
}
for {
metric, err := parser.Next()
if err != nil {
if err == influx.EOF {
return // stream ended
}
if parseErr, isParseError := err.(*influx.ParseError); isParseError {
fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr)
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "ERR %v\n", err)
os.Exit(1)
}
c, found := metric.GetField("count")
if !found {
fmt.Fprintf(os.Stderr, "metric has no count field\n")
os.Exit(1)
}
switch t := c.(type) {
case float64:
t *= 2
metric.AddField("count", t)
case int64:
t *= 2
metric.AddField("count", t)
default:
fmt.Fprintf(os.Stderr, "count is not an unknown type, it's a %T\n", c)
os.Exit(1)
}
b, err := serializer.Serialize(metric)
if err != nil {
fmt.Fprintf(os.Stderr, "ERR %v\n", err)
os.Exit(1)
}
fmt.Fprint(os.Stdout, string(b))
}
}
```
to run it, you'd build the binary using go, eg `go build -o multiplier.exe
main.go`
```toml
[[processors.execd]]
command = ["multiplier.exe"]
```
### Ruby daemon
- See [Ruby daemon](./examples/multiplier_line_protocol/multiplier_line_protocol.rb)
```toml
[[processors.execd]]
command = ["ruby", "plugins/processors/execd/examples/multiplier_line_protocol/multiplier_line_protocol.rb"]
```

View file

@ -0,0 +1,14 @@
[agent]
interval = "10s"
[[inputs.execd]]
command = ["ruby", "plugins/inputs/execd/examples/count.rb"]
[[processors.execd]]
command = ["ruby", "plugins/processors/execd/examples/multiplier_line_protocol/multiplier_line_protocol.rb"]
[[outputs.file]]
files = ["stdout"]
data_format = "influx"

View file

@ -0,0 +1,27 @@
#!/usr/bin/env ruby
loop do
# example input: "counter_ruby count=0 1586302128978187000"
line = STDIN.readline.chomp
# parse out influx line protocol sections with a really simple hand-rolled parser that doesn't support escaping.
# for a full line parser in ruby, check out something like the influxdb-lineprotocol-parser gem.
parts = line.split(" ")
case parts.size
when 3
measurement, fields, timestamp = parts
when 4
measurement, tags, fields, timestamp = parts
else
STDERR.puts "Unable to parse line protocol"
exit 1
end
fields = fields.split(",").map{|t|
k,v = t.split("=")
if k == "count"
v = v.to_i * 2 # multiple count metric by two
end
"#{k}=#{v}"
}.join(",")
puts [measurement, tags, fields, timestamp].select{|s| s && s.size != 0 }.join(" ")
STDOUT.flush
end

View file

@ -0,0 +1,184 @@
//go:generate ../../../tools/readme_config_includer/generator
package execd
import (
"bufio"
_ "embed"
"errors"
"fmt"
"io"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal/process"
"github.com/influxdata/telegraf/models"
"github.com/influxdata/telegraf/plugins/parsers/influx"
"github.com/influxdata/telegraf/plugins/processors"
)
//go:embed sample.conf
var sampleConfig string
type Execd struct {
Command []string `toml:"command"`
Environment []string `toml:"environment"`
RestartDelay config.Duration `toml:"restart_delay"`
Log telegraf.Logger
parser telegraf.Parser
serializer telegraf.Serializer
acc telegraf.Accumulator
process *process.Process
}
func (e *Execd) SetParser(p telegraf.Parser) {
e.parser = p
}
func (e *Execd) SetSerializer(s telegraf.Serializer) {
e.serializer = s
}
func (*Execd) SampleConfig() string {
return sampleConfig
}
func (e *Execd) Start(acc telegraf.Accumulator) error {
e.acc = acc
var err error
e.process, err = process.New(e.Command, e.Environment)
if err != nil {
return fmt.Errorf("error creating new process: %w", err)
}
e.process.Log = e.Log
e.process.RestartDelay = time.Duration(e.RestartDelay)
e.process.ReadStdoutFn = e.cmdReadOut
e.process.ReadStderrFn = e.cmdReadErr
if err = e.process.Start(); err != nil {
// if there was only one argument, and it contained spaces, warn the user
// that they may have configured it wrong.
if len(e.Command) == 1 && strings.Contains(e.Command[0], " ") {
e.Log.Warn("The processors.execd Command contained spaces but no arguments. " +
"This setting expects the program and arguments as an array of strings, " +
"not as a space-delimited string. See the plugin readme for an example.")
}
return fmt.Errorf("failed to start process %s: %w", e.Command, err)
}
return nil
}
func (e *Execd) Add(m telegraf.Metric, _ telegraf.Accumulator) error {
b, err := e.serializer.Serialize(m)
if err != nil {
return fmt.Errorf("metric serializing error: %w", err)
}
_, err = e.process.Stdin.Write(b)
if err != nil {
return fmt.Errorf("error writing to process stdin: %w", err)
}
// We cannot maintain tracking metrics at the moment because input/output
// is done asynchronously and we don't have any metric metadata to tie the
// output metric back to the original input metric.
m.Accept()
return nil
}
func (e *Execd) Stop() {
e.process.Stop()
}
func (e *Execd) cmdReadOut(out io.Reader) {
// Prefer using the StreamParser when parsing influx format.
var parser telegraf.Parser
if rp, ok := e.parser.(*models.RunningParser); ok {
parser = rp.Parser
} else {
parser = e.parser
}
if _, isInfluxParser := parser.(*influx.Parser); isInfluxParser {
e.cmdReadOutStream(out)
return
}
scanner := bufio.NewScanner(out)
scanBuf := make([]byte, 4096)
scanner.Buffer(scanBuf, 262144)
for scanner.Scan() {
metrics, err := e.parser.Parse(scanner.Bytes())
if err != nil {
e.Log.Errorf("Parse error: %s", err)
}
for _, metric := range metrics {
e.acc.AddMetric(metric)
}
}
if err := scanner.Err(); err != nil {
e.Log.Errorf("Error reading stdout: %s", err)
}
}
func (e *Execd) cmdReadOutStream(out io.Reader) {
parser := influx.NewStreamParser(out)
for {
metric, err := parser.Next()
if err != nil {
// Stop parsing when we've reached the end.
if errors.Is(err, influx.EOF) {
break
}
var parseErr *influx.ParseError
if errors.As(err, &parseErr) {
// Continue past parse errors.
e.acc.AddError(parseErr)
continue
}
// Stop reading on any non-recoverable error.
e.acc.AddError(err)
return
}
e.acc.AddMetric(metric)
}
}
func (e *Execd) cmdReadErr(out io.Reader) {
scanner := bufio.NewScanner(out)
for scanner.Scan() {
e.Log.Errorf("stderr: %q", scanner.Text())
}
if err := scanner.Err(); err != nil {
e.Log.Errorf("Error reading stderr: %s", err)
}
}
func (e *Execd) Init() error {
if len(e.Command) == 0 {
return errors.New("no command specified")
}
return nil
}
func init() {
processors.AddStreaming("execd", func() telegraf.StreamingProcessor {
return &Execd{
RestartDelay: config.Duration(10 * time.Second),
}
})
}

View file

@ -0,0 +1,492 @@
package execd
import (
"errors"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/metric"
_ "github.com/influxdata/telegraf/plugins/parsers/all"
"github.com/influxdata/telegraf/plugins/parsers/influx"
"github.com/influxdata/telegraf/plugins/processors"
_ "github.com/influxdata/telegraf/plugins/serializers/all"
serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx"
"github.com/influxdata/telegraf/testutil"
)
func TestExternalProcessorWorks(t *testing.T) {
// Determine name of the test executable for mocking an external program
exe, err := os.Executable()
require.NoError(t, err)
// Setup the plugin
plugin := &Execd{
Command: []string{
exe,
"-case", "multiply",
"-field", "count",
},
Environment: []string{"PLUGINS_PROCESSORS_EXECD_MODE=application"},
RestartDelay: config.Duration(5 * time.Second),
Log: testutil.Logger{},
}
// Setup the parser and serializer in the processor
parser := &influx.Parser{}
require.NoError(t, parser.Init())
plugin.SetParser(parser)
serializer := &serializers_influx.Serializer{}
require.NoError(t, serializer.Init())
plugin.SetSerializer(serializer)
// Setup the input and expected output metrucs
now := time.Now()
var input []telegraf.Metric
var expected []telegraf.Metric
for i := 0; i < 10; i++ {
m := metric.New(
"test",
map[string]string{"city": "Toronto"},
map[string]interface{}{"population": 6000000, "count": 1},
now.Add(time.Duration(i)),
)
input = append(input, m)
e := m.Copy()
e.AddField("count", 2)
expected = append(expected, e)
}
// Perform the test and check the result
var acc testutil.Accumulator
require.NoError(t, plugin.Start(&acc))
defer plugin.Stop()
for _, m := range input {
require.NoError(t, plugin.Add(m, &acc))
}
require.Eventually(t, func() bool {
return acc.NMetrics() >= uint64(len(expected))
}, 3*time.Second, 100*time.Millisecond)
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func TestParseLinesWithNewLines(t *testing.T) {
// Determine name of the test executable for mocking an external program
exe, err := os.Executable()
require.NoError(t, err)
// Setup the plugin
plugin := &Execd{
Command: []string{
exe,
"-case", "multiply",
"-field", "count",
},
Environment: []string{"PLUGINS_PROCESSORS_EXECD_MODE=application"},
RestartDelay: config.Duration(5 * time.Second),
Log: testutil.Logger{},
}
// Setup the parser and serializer in the processor
parser := &influx.Parser{}
require.NoError(t, parser.Init())
plugin.SetParser(parser)
serializer := &serializers_influx.Serializer{}
require.NoError(t, serializer.Init())
plugin.SetSerializer(serializer)
// Setup the input and expected output metrucs
now := time.Now()
input := metric.New(
"test",
map[string]string{
"author": "Mr. Gopher",
},
map[string]interface{}{
"phrase": "Gophers are amazing creatures.\nAbsolutely amazing.",
"count": 3,
},
now,
)
expected := []telegraf.Metric{
metric.New(
"test",
map[string]string{"author": "Mr. Gopher"},
map[string]interface{}{
"phrase": "Gophers are amazing creatures.\nAbsolutely amazing.",
"count": 6,
},
now,
),
}
// Perform the test and check the result
var acc testutil.Accumulator
require.NoError(t, plugin.Start(&acc))
defer plugin.Stop()
require.NoError(t, plugin.Add(input, &acc))
require.Eventually(t, func() bool {
return acc.NMetrics() >= uint64(len(expected))
}, 3*time.Second, 100*time.Millisecond)
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func TestLongLinesForLineProtocol(t *testing.T) {
// Determine name of the test executable for mocking an external program
exe, err := os.Executable()
require.NoError(t, err)
// Setup the plugin
plugin := &Execd{
Command: []string{
exe,
"-case", "long",
"-field", "long",
},
Environment: []string{"PLUGINS_PROCESSORS_EXECD_MODE=application"},
RestartDelay: config.Duration(5 * time.Second),
Log: testutil.Logger{},
}
// Setup the parser and serializer in the processor
parser := &influx.Parser{}
require.NoError(t, parser.Init())
plugin.SetParser(parser)
serializer := &serializers_influx.Serializer{}
require.NoError(t, serializer.Init())
plugin.SetSerializer(serializer)
// Setup the input and expected output metrucs
now := time.Now()
input := metric.New(
"test",
map[string]string{"author": "Mr. Gopher"},
map[string]interface{}{"count": 3},
now,
)
expected := []telegraf.Metric{
metric.New(
"test",
map[string]string{"author": "Mr. Gopher"},
map[string]interface{}{
"long": strings.Repeat("foobar", 280_000/6),
"count": 3,
},
now,
),
}
// Perform the test and check the result
var acc testutil.Accumulator
require.NoError(t, plugin.Start(&acc))
defer plugin.Stop()
require.NoError(t, plugin.Add(input, &acc))
require.Eventually(t, func() bool {
return acc.NMetrics() >= uint64(len(expected))
}, 3*time.Second, 100*time.Millisecond)
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func TestCases(t *testing.T) {
// Get all directories in testcases
folders, err := os.ReadDir("testcases")
require.NoError(t, err)
// Make sure tests contains data
require.NotEmpty(t, folders)
// Set up for file inputs
processors.AddStreaming("execd", func() telegraf.StreamingProcessor {
return &Execd{RestartDelay: config.Duration(10 * time.Second)}
})
for _, f := range folders {
// Only handle folders
if !f.IsDir() {
continue
}
fname := f.Name()
t.Run(fname, func(t *testing.T) {
testdataPath := filepath.Join("testcases", fname)
configFilename := filepath.Join(testdataPath, "telegraf.conf")
inputFilename := filepath.Join(testdataPath, "input.influx")
expectedFilename := filepath.Join(testdataPath, "expected.out")
// Get parser to parse input and expected output
parser := &influx.Parser{}
require.NoError(t, parser.Init())
input, err := testutil.ParseMetricsFromFile(inputFilename, parser)
require.NoError(t, err)
expected, err := testutil.ParseMetricsFromFile(expectedFilename, parser)
require.NoError(t, err)
// Configure the plugin
cfg := config.NewConfig()
require.NoError(t, cfg.LoadConfig(configFilename))
require.Len(t, cfg.Processors, 1, "wrong number of outputs")
plugin := cfg.Processors[0].Processor
// Process the metrics
var acc testutil.Accumulator
require.NoError(t, plugin.Start(&acc))
for _, m := range input {
require.NoError(t, plugin.Add(m, &acc))
}
plugin.Stop()
require.Eventually(t, func() bool {
return acc.NMetrics() >= uint64(len(expected))
}, time.Second, 100*time.Millisecond)
// Check the expectations
actual := acc.GetTelegrafMetrics()
testutil.RequireMetricsEqual(t, expected, actual)
})
}
}
func TestTracking(t *testing.T) {
now := time.Now()
// Setup the raw input and expected output data
inputRaw := []telegraf.Metric{
metric.New(
"test",
map[string]string{
"city": "Toronto",
},
map[string]interface{}{
"population": 6000000,
"count": 1,
},
now,
),
metric.New(
"test",
map[string]string{
"city": "Tokio",
},
map[string]interface{}{
"population": 14000000,
"count": 8,
},
now,
),
}
expected := []telegraf.Metric{
metric.New(
"test",
map[string]string{
"city": "Toronto",
},
map[string]interface{}{
"population": 6000000,
"count": 2,
},
now,
),
metric.New(
"test",
map[string]string{
"city": "Tokio",
},
map[string]interface{}{
"population": 14000000,
"count": 16,
},
now,
),
}
// Create a testing notifier
var mu sync.Mutex
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
notify := func(di telegraf.DeliveryInfo) {
mu.Lock()
defer mu.Unlock()
delivered = append(delivered, di)
}
// Convert raw input to tracking metrics
input := make([]telegraf.Metric, 0, len(inputRaw))
for _, m := range inputRaw {
tm, _ := metric.WithTracking(m, notify)
input = append(input, tm)
}
// Setup the plugin
exe, err := os.Executable()
require.NoError(t, err)
plugin := &Execd{
Command: []string{
exe,
"-case", "multiply",
"-field", "count",
},
Environment: []string{"PLUGINS_PROCESSORS_EXECD_MODE=application"},
RestartDelay: config.Duration(5 * time.Second),
Log: testutil.Logger{},
}
require.NoError(t, plugin.Init())
parser := &influx.Parser{}
require.NoError(t, parser.Init())
plugin.SetParser(parser)
serializer := &serializers_influx.Serializer{}
require.NoError(t, serializer.Init())
plugin.SetSerializer(serializer)
var acc testutil.Accumulator
require.NoError(t, plugin.Start(&acc))
defer plugin.Stop()
// Process expected metrics and compare with resulting metrics
for _, in := range input {
require.NoError(t, plugin.Add(in, &acc))
}
require.Eventually(t, func() bool {
return int(acc.NMetrics()) >= len(expected)
}, 3*time.Second, 100*time.Millisecond)
actual := acc.GetTelegrafMetrics()
testutil.RequireMetricsEqual(t, expected, actual)
// Simulate output acknowledging delivery
for _, m := range actual {
m.Accept()
}
// Check delivery
require.Eventuallyf(t, func() bool {
mu.Lock()
defer mu.Unlock()
return len(input) == len(delivered)
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
}
func TestMain(m *testing.M) {
var testcase, field string
flag.StringVar(&testcase, "case", "", "test-case to mock [multiply, long]")
flag.StringVar(&field, "field", "count", "name of the field to multiply")
flag.Parse()
if os.Getenv("PLUGINS_PROCESSORS_EXECD_MODE") != "application" || testcase == "" {
os.Exit(m.Run())
}
switch testcase {
case "multiply":
os.Exit(runTestCaseMultiply(field))
case "long":
os.Exit(runTestCaseLong(field))
}
os.Exit(5)
}
func runTestCaseMultiply(field string) int {
parser := influx.NewStreamParser(os.Stdin)
serializer := &serializers_influx.Serializer{}
if err := serializer.Init(); err != nil {
fmt.Fprintf(os.Stderr, "initialization ERR %v\n", err)
return 1
}
for {
m, err := parser.Next()
if err != nil {
if errors.Is(err, influx.EOF) {
return 0
}
var parseErr *influx.ParseError
if errors.As(err, &parseErr) {
fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr)
return 1
}
fmt.Fprintf(os.Stderr, "ERR %v\n", err)
return 1
}
c, found := m.GetField(field)
if !found {
fmt.Fprintf(os.Stderr, "metric has no field %q\n", field)
return 1
}
switch t := c.(type) {
case float64:
m.AddField(field, t*2)
case int64:
m.AddField(field, t*2)
default:
fmt.Fprintf(os.Stderr, "%s has an unknown type, it's a %T\n", field, c)
return 1
}
b, err := serializer.Serialize(m)
if err != nil {
fmt.Fprintf(os.Stderr, "ERR %v\n", err)
return 1
}
fmt.Fprint(os.Stdout, string(b))
}
}
func runTestCaseLong(field string) int {
parser := influx.NewStreamParser(os.Stdin)
serializer := &serializers_influx.Serializer{}
if err := serializer.Init(); err != nil {
fmt.Fprintf(os.Stderr, "initialization ERR %v\n", err)
return 1
}
// Setup a field with a lot of characters to exceed the scanner limit
long := strings.Repeat("foobar", 280_000/6)
for {
m, err := parser.Next()
if err != nil {
if errors.Is(err, influx.EOF) {
return 0
}
var parseErr *influx.ParseError
if errors.As(err, &parseErr) {
fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr)
return 1
}
fmt.Fprintf(os.Stderr, "ERR %v\n", err)
return 1
}
m.AddField(field, long)
b, err := serializer.Serialize(m)
if err != nil {
fmt.Fprintf(os.Stderr, "ERR %v\n", err)
return 1
}
fmt.Fprint(os.Stdout, string(b))
}
}

View file

@ -0,0 +1,20 @@
# Run executable as long-running processor plugin
[[processors.execd]]
## One program to run as daemon.
## NOTE: process and each argument should each be their own string
## eg: command = ["/path/to/your_program", "arg1", "arg2"]
command = ["cat"]
## Environment variables
## Array of "key=value" pairs to pass as environment variables
## e.g. "KEY=value", "USERNAME=John Doe",
## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs"
# environment = []
## Delay before the process is restarted after an unexpected termination
# restart_delay = "10s"
## Serialization format for communicating with the executed program
## Please note that the corresponding data-format must exist both in
## parsers and serializers
# data_format = "influx"

View file

@ -0,0 +1,5 @@
cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000123
cpu,cpu=cpu-total,host=Munin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000456
cpu,cpu=cpu-total,host=Thor usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000789
disk,device=nvme0n1p4,fstype=ext4,host=Hugin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000111
disk,device=nvme0n1p4,fstype=ext4,host=Munin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000222

View file

@ -0,0 +1,5 @@
cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000123
cpu,cpu=cpu-total,host=Munin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000456
cpu,cpu=cpu-total,host=Thor usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000789
disk,device=nvme0n1p4,fstype=ext4,host=Hugin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000111
disk,device=nvme0n1p4,fstype=ext4,host=Munin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000222

View file

@ -0,0 +1,3 @@
[[processors.execd]]
command = ["go", "run", "testcases/pass-through.go"]
data_format = "influx"

View file

@ -0,0 +1,5 @@
cpu fields_usage_guest=0,fields_usage_guest_nice=0,fields_usage_idle=99.75000000049295,fields_usage_iowait=0,fields_usage_irq=0.1250000000007958,fields_usage_nice=0,fields_usage_softirq=0,fields_usage_steal=0,fields_usage_system=0,fields_usage_user=0.12500000000363798 1678124473000000000
cpu fields_usage_guest=0,fields_usage_guest_nice=0,fields_usage_idle=99.75000000049295,fields_usage_iowait=0,fields_usage_irq=0.1250000000007958,fields_usage_nice=0,fields_usage_softirq=0,fields_usage_steal=0,fields_usage_system=0,fields_usage_user=0.12500000000363798 1678124473000000000
cpu fields_usage_guest=0,fields_usage_guest_nice=0,fields_usage_idle=99.75000000049295,fields_usage_iowait=0,fields_usage_irq=0.1250000000007958,fields_usage_nice=0,fields_usage_softirq=0,fields_usage_steal=0,fields_usage_system=0,fields_usage_user=0.12500000000363798 1678124473000000000
disk fields_free=65652391936,fields_inodes_free=40445279,fields_inodes_total=45047808,fields_inodes_used=4602529,fields_total=725328994304,fields_used=622756728832,fields_used_percent=90.4631722684 1678124473000000000
disk fields_free=65652391936,fields_inodes_free=40445279,fields_inodes_total=45047808,fields_inodes_used=4602529,fields_total=725328994304,fields_used=622756728832,fields_used_percent=90.4631722684 1678124473000000000

View file

@ -0,0 +1,5 @@
cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000123
cpu,cpu=cpu-total,host=Munin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000456
cpu,cpu=cpu-total,host=Thor usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000789
disk,device=nvme0n1p4,fstype=ext4,host=Hugin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000111
disk,device=nvme0n1p4,fstype=ext4,host=Munin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000222

View file

@ -0,0 +1,6 @@
[[processors.execd]]
command = ["go", "run", "testcases/pass-through.go"]
data_format = "json"
json_name_key = "name"
json_time_key = "timestamp"
json_time_format = "unix"

View file

@ -0,0 +1,5 @@
cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000123
cpu,cpu=cpu-total,host=Munin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000456
cpu,cpu=cpu-total,host=Thor usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000789
disk,device=nvme0n1p4,fstype=ext4,host=Hugin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000111
disk,device=nvme0n1p4,fstype=ext4,host=Munin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000222

View file

@ -0,0 +1,5 @@
cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000123
cpu,cpu=cpu-total,host=Munin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000456
cpu,cpu=cpu-total,host=Thor usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000789
disk,device=nvme0n1p4,fstype=ext4,host=Hugin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000111
disk,device=nvme0n1p4,fstype=ext4,host=Munin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000222

View file

@ -0,0 +1,2 @@
[[processors.execd]]
command = ["go", "run", "testcases/pass-through.go"]

View file

@ -0,0 +1,18 @@
package main
import (
"bufio"
"fmt"
"os"
)
func main() {
var reader = bufio.NewReader(os.Stdin)
for {
message, err := reader.ReadString('\n')
if err != nil {
os.Exit(1)
}
fmt.Println(message)
}
}

View file

@ -0,0 +1,227 @@
# Filepath Processor Plugin
The `filepath` processor plugin maps certain go functions from
[path/filepath](https://golang.org/pkg/path/filepath/) onto tag and field
values. Values can be modified in place or stored in another key.
Implemented functions are:
* [Base](https://golang.org/pkg/path/filepath/#Base) (accessible through `[[processors.filepath.basename]]`)
* [Rel](https://golang.org/pkg/path/filepath/#Rel) (accessible through `[[processors.filepath.rel]]`)
* [Dir](https://golang.org/pkg/path/filepath/#Dir) (accessible through `[[processors.filepath.dir]]`)
* [Clean](https://golang.org/pkg/path/filepath/#Clean) (accessible through `[[processors.filepath.clean]]`)
* [ToSlash](https://golang.org/pkg/path/filepath/#ToSlash) (accessible through `[[processors.filepath.toslash]]`)
On top of that, the plugin provides an extra function to retrieve the final path
component without its extension. This function is accessible through the
`[[processors.filepath.stem]]` configuration item.
Please note that, in this implementation, these functions are processed in the
order that they appear above( except for `stem` that is applied in the first
place).
Specify the `tag` and/or `field` that you want processed in each section and
optionally a `dest` if you want the result stored in a new tag or field.
If you plan to apply multiple transformations to the same `tag`/`field`, bear in
mind the processing order stated above.
Telegraf minimum version: Telegraf 1.15.0
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Performs file path manipulations on tags and fields
[[processors.filepath]]
## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
# [[processors.filepath.basename]]
# tag = "path"
# dest = "basepath"
## Treat the field value as a path and keep all but the last element of path, typically the path's directory
# [[processors.filepath.dirname]]
# field = "path"
## Treat the tag value as a path, converting it to its the last element without its suffix
# [[processors.filepath.stem]]
# tag = "path"
## Treat the tag value as a path, converting it to the shortest path name equivalent
## to path by purely lexical processing
# [[processors.filepath.clean]]
# tag = "path"
## Treat the tag value as a path, converting it to a relative path that is lexically
## equivalent to the source path when joined to 'base_path'
# [[processors.filepath.rel]]
# tag = "path"
# base_path = "/var/log"
## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
## effect on Windows
# [[processors.filepath.toslash]]
# tag = "path"
```
## Considerations
### Clean Automatic Invocation
Even though `clean` is provided a standalone function, it is also invoked when
using the `rel` and `dirname` functions, so there is no need to use it along
with them.
That is:
```toml
[[processors.filepath]]
[[processors.filepath.dir]]
tag = "path"
[[processors.filepath.clean]]
tag = "path"
```
Is equivalent to:
```toml
[[processors.filepath]]
[[processors.filepath.dir]]
tag = "path"
```
### ToSlash Platform-specific Behavior
The effects of this function are only noticeable on Windows platforms, because
of the underlying golang implementation.
## Examples
### Basename
```toml
[[processors.filepath]]
[[processors.filepath.basename]]
tag = "path"
```
```diff
- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="ajob.log" duration_seconds=134 1587920425000000000
```
### Dirname
```toml
[[processors.filepath]]
[[processors.filepath.dirname]]
field = "path"
dest = "folder"
```
```diff
- my_metric path="/var/log/batch/ajob.log",duration_seconds=134 1587920425000000000
+ my_metric path="/var/log/batch/ajob.log",folder="/var/log/batch",duration_seconds=134 1587920425000000000
```
### Stem
```toml
[[processors.filepath]]
[[processors.filepath.stem]]
tag = "path"
```
```diff
- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="ajob" duration_seconds=134 1587920425000000000
```
### Clean
```toml
[[processors.filepath]]
[[processors.filepath.clean]]
tag = "path"
```
```diff
- my_metric,path="/var/log/dummy/../batch//ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
```
### Rel
```toml
[[processors.filepath]]
[[processors.filepath.rel]]
tag = "path"
base_path = "/var/log"
```
```diff
- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="batch/ajob.log" duration_seconds=134 1587920425000000000
```
### ToSlash
```toml
[[processors.filepath]]
[[processors.filepath.rel]]
tag = "path"
```
```diff
- my_metric,path="\var\log\batch\ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
```
## Processing paths from tail plugin
This plugin can be used together with the [tail input
plugin](../../inputs/tail/README.md) to make modifications to the `path` tag
injected for every file.
Scenario:
* A log file `/var/log/myjobs/mysql_backup.log`, containing logs for a job execution. Whenever the job ends, a line is
written to the log file following this format: `2020-04-05 11:45:21 total time execution: 70 seconds`
* We want to generate a measurement that captures the duration of the script as a field and includes the `path` as a
tag
* We are interested in the filename without its extensions, since it might be enough information for plotting our
execution times in a dashboard
* Just in case, we don't want to override the original path (if for some reason we end up having duplicates we might
want this information)
For this purpose, we will use the `tail` input plugin, the `grok` parser plugin
and the `filepath` processor.
```toml
# Performs file path manipulations on tags and fields
[[inputs.tail]]
files = ["/var/log/myjobs/**.log"]
data_format = "grok"
grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} total time execution: %{NUMBER:duration_seconds:int}']
name_override = "myjobs"
[[processors.filepath]]
[[processors.filepath.stem]]
tag = "path"
dest = "stempath"
```
The resulting output for a job taking 70 seconds for the mentioned log file
would look like:
```text
myjobs_duration_seconds,host="my-host",path="/var/log/myjobs/mysql_backup.log",stempath="mysql_backup" 70 1587920425000000000
```

View file

@ -0,0 +1,125 @@
//go:generate ../../../tools/readme_config_includer/generator
package filepath
import (
_ "embed"
"path/filepath"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/processors"
)
//go:embed sample.conf
var sampleConfig string
type Options struct {
BaseName []BaseOpts `toml:"basename"`
DirName []BaseOpts `toml:"dirname"`
Stem []BaseOpts
Clean []BaseOpts
Rel []RelOpts
ToSlash []BaseOpts `toml:"toslash"`
Log telegraf.Logger `toml:"-"`
}
type ProcessorFunc func(s string) string
// BaseOpts contains options applicable to every function
type BaseOpts struct {
Field string
Tag string
Dest string
}
type RelOpts struct {
BaseOpts
BasePath string
}
// applyFunc applies the specified function to the metric
func applyFunc(bo BaseOpts, fn ProcessorFunc, metric telegraf.Metric) {
if bo.Tag != "" {
if v, ok := metric.GetTag(bo.Tag); ok {
targetTag := bo.Tag
if bo.Dest != "" {
targetTag = bo.Dest
}
metric.AddTag(targetTag, fn(v))
}
}
if bo.Field != "" {
if v, ok := metric.GetField(bo.Field); ok {
targetField := bo.Field
if bo.Dest != "" {
targetField = bo.Dest
}
// Only string fields are considered
if v, ok := v.(string); ok {
metric.AddField(targetField, fn(v))
}
}
}
}
func stemFilePath(path string) string {
return strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
}
// processMetric processes fields and tag values for a given metric applying the selected transformations
func (o *Options) processMetric(metric telegraf.Metric) {
// Stem
for _, v := range o.Stem {
applyFunc(v, stemFilePath, metric)
}
// Basename
for _, v := range o.BaseName {
applyFunc(v, filepath.Base, metric)
}
// Rel
for _, v := range o.Rel {
applyFunc(v.BaseOpts, func(s string) string {
relPath, err := filepath.Rel(v.BasePath, s)
if err != nil {
o.Log.Errorf("filepath processor failed to process relative filepath %s: %v", s, err)
return v.BasePath
}
return relPath
}, metric)
}
// Dirname
for _, v := range o.DirName {
applyFunc(v, filepath.Dir, metric)
}
// Clean
for _, v := range o.Clean {
applyFunc(v, filepath.Clean, metric)
}
// ToSlash
for _, v := range o.ToSlash {
applyFunc(v, filepath.ToSlash, metric)
}
}
func (*Options) SampleConfig() string {
return sampleConfig
}
func (o *Options) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, m := range in {
o.processMetric(m)
}
return in
}
func init() {
processors.Add("filepath", func() telegraf.Processor {
return &Options{}
})
}

View file

@ -0,0 +1,134 @@
//go:build !windows
package filepath
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
var samplePath = "/my/test//c/../path/file.log"
func TestOptions_Apply(t *testing.T) {
tests := []testCase{
{
name: "Smoke Test",
o: newOptions("/my/test/"),
inputMetrics: getSmokeTestInputMetrics(samplePath),
expectedMetrics: []telegraf.Metric{
testutil.MustMetric(
smokeMetricName,
map[string]string{
"baseTag": "file.log",
"dirTag": "/my/test/path",
"stemTag": "file",
"cleanTag": "/my/test/path/file.log",
"relTag": "path/file.log",
"slashTag": "/my/test//c/../path/file.log",
},
map[string]interface{}{
"baseField": "file.log",
"dirField": "/my/test/path",
"stemField": "file",
"cleanField": "/my/test/path/file.log",
"relField": "path/file.log",
"slashField": "/my/test//c/../path/file.log",
},
time.Now()),
},
},
{
name: "Test Dest Option",
o: &Options{
BaseName: []BaseOpts{
{
Field: "sourcePath",
Tag: "sourcePath",
Dest: "basePath",
},
}},
inputMetrics: []telegraf.Metric{
testutil.MustMetric(
"testMetric",
map[string]string{"sourcePath": samplePath},
map[string]interface{}{"sourcePath": samplePath},
time.Now()),
},
expectedMetrics: []telegraf.Metric{
testutil.MustMetric(
"testMetric",
map[string]string{"sourcePath": samplePath, "basePath": "file.log"},
map[string]interface{}{"sourcePath": samplePath, "basePath": "file.log"},
time.Now()),
},
},
}
runTestOptionsApply(t, tests)
}
func TestTracking(t *testing.T) {
inputRaw := []telegraf.Metric{
metric.New(
"test",
map[string]string{"sourcePath": samplePath},
map[string]interface{}{"sourcePath": samplePath},
time.Unix(0, 0),
),
}
expected := []telegraf.Metric{
metric.New(
"test",
map[string]string{"sourcePath": samplePath, "basePath": "file.log"},
map[string]interface{}{"sourcePath": samplePath, "basePath": "file.log"},
time.Unix(0, 0),
),
}
var mu sync.Mutex
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
notify := func(di telegraf.DeliveryInfo) {
mu.Lock()
defer mu.Unlock()
delivered = append(delivered, di)
}
input := make([]telegraf.Metric, 0, len(inputRaw))
for _, m := range inputRaw {
tm, _ := metric.WithTracking(m, notify)
input = append(input, tm)
}
plugin := &Options{
BaseName: []BaseOpts{
{
Field: "sourcePath",
Tag: "sourcePath",
Dest: "basePath",
},
},
}
// Process expected metrics and compare with resulting metrics
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
// Simulate output acknowledging delivery
for _, m := range actual {
m.Accept()
}
// Check delivery
require.Eventuallyf(t, func() bool {
mu.Lock()
defer mu.Unlock()
return len(input) == len(delivered)
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
}

View file

@ -0,0 +1,100 @@
package filepath
import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
const smokeMetricName = "testmetric"
type testCase struct {
name string
o *Options
inputMetrics []telegraf.Metric
expectedMetrics []telegraf.Metric
}
func newOptions(basePath string) *Options {
return &Options{
BaseName: []BaseOpts{
{
Field: "baseField",
Tag: "baseTag",
},
},
DirName: []BaseOpts{
{
Field: "dirField",
Tag: "dirTag",
},
},
Stem: []BaseOpts{
{
Field: "stemField",
Tag: "stemTag",
},
},
Clean: []BaseOpts{
{
Field: "cleanField",
Tag: "cleanTag",
},
},
Rel: []RelOpts{
{
BaseOpts: BaseOpts{
Field: "relField",
Tag: "relTag",
},
BasePath: basePath,
},
},
ToSlash: []BaseOpts{
{
Field: "slashField",
Tag: "slashTag",
},
},
}
}
func getSampleMetricTags(path string) map[string]string {
return map[string]string{
"baseTag": path,
"dirTag": path,
"stemTag": path,
"cleanTag": path,
"relTag": path,
"slashTag": path,
}
}
func getSampleMetricFields(path string) map[string]interface{} {
return map[string]interface{}{
"baseField": path,
"dirField": path,
"stemField": path,
"cleanField": path,
"relField": path,
"slashField": path,
}
}
func getSmokeTestInputMetrics(path string) []telegraf.Metric {
return []telegraf.Metric{
testutil.MustMetric(smokeMetricName, getSampleMetricTags(path), getSampleMetricFields(path),
time.Now()),
}
}
func runTestOptionsApply(t *testing.T, tests []testCase) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.o.Apply(tt.inputMetrics...)
testutil.RequireMetricsEqual(t, tt.expectedMetrics, got, testutil.SortMetrics(), testutil.IgnoreTime())
})
}
}

View file

@ -0,0 +1,43 @@
package filepath
import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
var samplePath = "c:\\my\\test\\\\c\\..\\path\\file.log"
func TestOptions_Apply(t *testing.T) {
tests := []testCase{
{
name: "Smoke Test",
o: newOptions("c:\\my\\test\\"),
inputMetrics: getSmokeTestInputMetrics(samplePath),
expectedMetrics: []telegraf.Metric{
testutil.MustMetric(
smokeMetricName,
map[string]string{
"baseTag": "file.log",
"dirTag": "c:\\my\\test\\path",
"stemTag": "file",
"cleanTag": "c:\\my\\test\\path\\file.log",
"relTag": "path\\file.log",
"slashTag": "c:/my/test//c/../path/file.log",
},
map[string]interface{}{
"baseField": "file.log",
"dirField": "c:\\my\\test\\path",
"stemField": "file",
"cleanField": "c:\\my\\test\\path\\file.log",
"relField": "path\\file.log",
"slashField": "c:/my/test//c/../path/file.log",
},
time.Now()),
},
},
}
runTestOptionsApply(t, tests)
}

View file

@ -0,0 +1,30 @@
# Performs file path manipulations on tags and fields
[[processors.filepath]]
## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
# [[processors.filepath.basename]]
# tag = "path"
# dest = "basepath"
## Treat the field value as a path and keep all but the last element of path, typically the path's directory
# [[processors.filepath.dirname]]
# field = "path"
## Treat the tag value as a path, converting it to its the last element without its suffix
# [[processors.filepath.stem]]
# tag = "path"
## Treat the tag value as a path, converting it to the shortest path name equivalent
## to path by purely lexical processing
# [[processors.filepath.clean]]
# tag = "path"
## Treat the tag value as a path, converting it to a relative path that is lexically
## equivalent to the source path when joined to 'base_path'
# [[processors.filepath.rel]]
# tag = "path"
# base_path = "/var/log"
## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
## effect on Windows
# [[processors.filepath.toslash]]
# tag = "path"

View file

@ -0,0 +1,83 @@
# Filter Processor Plugin
The filter processor plugin allows to specify a set of rules for metrics
with the ability to _keep_ or _drop_ those metrics. It does _not_ change the
metric. As such a user might want to apply this processor to remove metrics
from the processing/output stream.
__NOTE:__ The filtering is _not_ output specific, but will apply to the metrics
processed by this processor.
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Filter metrics by the given criteria
[[processors.filter]]
## Default action if no rule applies
# default = "pass"
## Rules to apply on the incoming metrics (multiple rules are possible)
## The rules are evaluated in order and the first matching rule is applied.
## In case no rule matches the "default" is applied.
## All filter criteria in a rule must apply for the rule to match the metric
## i.e. the criteria are combined by a logical AND. If a criterion is
## omitted it is NOT applied at all and ignored.
[[processors.filter.rule]]
## List of metric names to match including glob expressions
# name = []
## List of tag key/values pairs to match including glob expressions
## ALL given tags keys must exist and at least one value must match
## for the metric to match the rule.
# tags = {}
## List of field keys to match including glob expressions
## At least one field must exist for the metric to match the rule.
# fields = []
## Action to apply for this rule
## "pass" will keep the metric and pass it on, while "drop" will remove
## the metric
# action = "drop"
```
## Examples
Consider a use-case where you collected a bunch of metrics
```text
machine,source="machine1",status="OK" operating_hours=37i,temperature=23.1
machine,source="machine2",status="warning" operating_hours=1433i,temperature=48.9,message="too hot"
machine,source="machine3",status="OK" operating_hours=811i,temperature=29.5
machine,source="machine4",status="failure" operating_hours=1009i,temperature=67.3,message="temperature alert"
```
but only want to keep the ones indicating a `status` of `failure` or `warning`:
```toml
[[processors.filter]]
namepass = ["machine"]
default = "drop"
[[processors.filter.rule]]
tags = {"status" = ["warning", "failure"]}
action = "pass"
```
Alternatively, you can "black-list" the `OK` value via
```toml
[[processors.filter]]
namepass = ["machine"]
[[processors.filter.rule]]
tags = {"status" = ["OK"]}
```

View file

@ -0,0 +1,75 @@
//go:generate ../../../tools/readme_config_includer/generator
package filter
import (
_ "embed"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/processors"
)
//go:embed sample.conf
var sampleConfig string
type Filter struct {
Rules []rule `toml:"rule"`
DefaultAction string `toml:"default"`
Log telegraf.Logger `toml:"-"`
defaultPass bool
}
func (*Filter) SampleConfig() string {
return sampleConfig
}
func (f *Filter) Init() error {
// Check the default-action setting
switch f.DefaultAction {
case "", "pass":
f.defaultPass = true
case "drop":
// Do nothing, those options are valid
if len(f.Rules) == 0 {
f.Log.Warn("dropping all metrics as no rule is provided")
}
default:
return fmt.Errorf("invalid default action %q", f.DefaultAction)
}
// Check and initialize rules
for i := range f.Rules {
if err := f.Rules[i].init(); err != nil {
return fmt.Errorf("initialization of rule %d failed: %w", i+1, err)
}
}
return nil
}
func (f *Filter) Apply(in ...telegraf.Metric) []telegraf.Metric {
out := make([]telegraf.Metric, 0, len(in))
for _, m := range in {
if f.applyRules(m) {
out = append(out, m)
} else {
m.Drop()
}
}
return out
}
func (f *Filter) applyRules(m telegraf.Metric) bool {
for _, r := range f.Rules {
if pass, applies := r.apply(m); applies {
return pass
}
}
return f.defaultPass
}
func init() {
processors.Add("filter", func() telegraf.Processor {
return &Filter{}
})
}

View file

@ -0,0 +1,738 @@
package filter
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
var testmetrics = []telegraf.Metric{
metric.New(
"packing",
map[string]string{
"source": "machine A",
"location": "main building",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 37,
"temperature": 23.1,
},
time.Unix(0, 0),
),
metric.New(
"foundry",
map[string]string{
"source": "machine B",
"location": "factory X",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 1337,
"temperature": 19.9,
"pieces": 96878,
},
time.Unix(0, 0),
),
metric.New(
"welding",
map[string]string{
"source": "machine C",
"location": "factory X",
"status": "failure",
},
map[string]interface{}{
"operating_hours": 1009,
"temperature": 67.3,
"message": "temperature alert",
},
time.Unix(0, 0),
),
metric.New(
"welding",
map[string]string{
"source": "machine D",
"location": "factory Y",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 825,
"temperature": 31.2,
},
time.Unix(0, 0),
),
}
func TestNoRules(t *testing.T) {
logger := &testutil.CaptureLogger{}
plugin := &Filter{
DefaultAction: "drop",
Log: logger,
}
require.NoError(t, plugin.Init())
warnings := logger.Warnings()
require.Len(t, warnings, 1)
require.Contains(t, warnings[0], "dropping all metrics")
}
func TestInvalidDefaultAction(t *testing.T) {
plugin := &Filter{
Rules: []rule{{Name: []string{"foo"}}},
DefaultAction: "foo",
}
require.ErrorContains(t, plugin.Init(), "invalid default action")
}
func TestNoMetric(t *testing.T) {
plugin := &Filter{
Rules: []rule{{Name: []string{"*"}}},
}
require.NoError(t, plugin.Init())
var input []telegraf.Metric
require.Empty(t, plugin.Apply(input...))
}
func TestDropAll(t *testing.T) {
plugin := &Filter{
Rules: []rule{{Name: []string{"*"}}},
}
require.NoError(t, plugin.Init())
require.Empty(t, plugin.Apply(testmetrics...))
}
func TestDropDefault(t *testing.T) {
plugin := &Filter{
Rules: []rule{{Name: []string{"foo"}, Action: "pass"}},
DefaultAction: "drop",
}
require.NoError(t, plugin.Init())
require.Empty(t, plugin.Apply(testmetrics...))
}
func TestPassAll(t *testing.T) {
plugin := &Filter{
Rules: []rule{{Name: []string{"*"}, Action: "pass"}},
DefaultAction: "drop",
}
require.NoError(t, plugin.Init())
expected := testmetrics
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestPassDefault(t *testing.T) {
plugin := &Filter{
Rules: []rule{{Name: []string{"foo"}, Action: "drop"}},
}
require.NoError(t, plugin.Init())
expected := testmetrics
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestNamePass(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Name: []string{"welding"},
Action: "pass",
},
},
DefaultAction: "drop",
}
require.NoError(t, plugin.Init())
expected := []telegraf.Metric{
metric.New(
"welding",
map[string]string{
"source": "machine C",
"location": "factory X",
"status": "failure",
},
map[string]interface{}{
"operating_hours": 1009,
"temperature": 67.3,
"message": "temperature alert",
},
time.Unix(0, 0),
),
metric.New(
"welding",
map[string]string{
"source": "machine D",
"location": "factory Y",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 825,
"temperature": 31.2,
},
time.Unix(0, 0),
),
}
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestNameDrop(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Name: []string{"welding"},
Action: "drop",
},
},
}
require.NoError(t, plugin.Init())
expected := []telegraf.Metric{
metric.New(
"packing",
map[string]string{
"source": "machine A",
"location": "main building",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 37,
"temperature": 23.1,
},
time.Unix(0, 0),
),
metric.New(
"foundry",
map[string]string{
"source": "machine B",
"location": "factory X",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 1337,
"temperature": 19.9,
"pieces": 96878,
},
time.Unix(0, 0),
),
}
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestNameGlob(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Name: []string{"*ing"},
Action: "drop",
},
},
}
require.NoError(t, plugin.Init())
expected := []telegraf.Metric{
metric.New(
"foundry",
map[string]string{
"source": "machine B",
"location": "factory X",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 1337,
"temperature": 19.9,
"pieces": 96878,
},
time.Unix(0, 0),
),
}
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestTagPass(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Tags: map[string][]string{"status": {"OK"}},
Action: "pass",
},
},
DefaultAction: "drop",
}
require.NoError(t, plugin.Init())
expected := []telegraf.Metric{
metric.New(
"packing",
map[string]string{
"source": "machine A",
"location": "main building",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 37,
"temperature": 23.1,
},
time.Unix(0, 0),
),
metric.New(
"foundry",
map[string]string{
"source": "machine B",
"location": "factory X",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 1337,
"temperature": 19.9,
"pieces": 96878,
},
time.Unix(0, 0),
),
metric.New(
"welding",
map[string]string{
"source": "machine D",
"location": "factory Y",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 825,
"temperature": 31.2,
},
time.Unix(0, 0),
),
}
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestTagDrop(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Tags: map[string][]string{"status": {"OK"}},
Action: "drop",
},
},
}
require.NoError(t, plugin.Init())
expected := []telegraf.Metric{
metric.New(
"welding",
map[string]string{
"source": "machine C",
"location": "factory X",
"status": "failure",
},
map[string]interface{}{
"operating_hours": 1009,
"temperature": 67.3,
"message": "temperature alert",
},
time.Unix(0, 0),
),
}
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestTagMultiple(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Tags: map[string][]string{
"location": {"factory X", "factory Y"},
"status": {"OK"},
},
Action: "pass",
},
},
DefaultAction: "drop",
}
require.NoError(t, plugin.Init())
expected := []telegraf.Metric{
metric.New(
"foundry",
map[string]string{
"source": "machine B",
"location": "factory X",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 1337,
"temperature": 19.9,
"pieces": 96878,
},
time.Unix(0, 0),
),
metric.New(
"welding",
map[string]string{
"source": "machine D",
"location": "factory Y",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 825,
"temperature": 31.2,
},
time.Unix(0, 0),
),
}
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestTagGlob(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Tags: map[string][]string{"location": {"factory *"}},
Action: "pass",
},
},
DefaultAction: "drop",
}
require.NoError(t, plugin.Init())
expected := []telegraf.Metric{
metric.New(
"foundry",
map[string]string{
"source": "machine B",
"location": "factory X",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 1337,
"temperature": 19.9,
"pieces": 96878,
},
time.Unix(0, 0),
),
metric.New(
"welding",
map[string]string{
"source": "machine C",
"location": "factory X",
"status": "failure",
},
map[string]interface{}{
"operating_hours": 1009,
"temperature": 67.3,
"message": "temperature alert",
},
time.Unix(0, 0),
),
metric.New(
"welding",
map[string]string{
"source": "machine D",
"location": "factory Y",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 825,
"temperature": 31.2,
},
time.Unix(0, 0),
),
}
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestTagDoesNotExist(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Tags: map[string][]string{
"operator": {"peter"},
"status": {"OK"},
},
Action: "pass",
},
},
DefaultAction: "drop",
}
require.NoError(t, plugin.Init())
require.Empty(t, plugin.Apply(testmetrics...))
}
func TestFieldPass(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Fields: []string{"message", "pieces"},
Action: "pass",
},
},
DefaultAction: "drop",
}
require.NoError(t, plugin.Init())
expected := []telegraf.Metric{
metric.New(
"foundry",
map[string]string{
"source": "machine B",
"location": "factory X",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 1337,
"temperature": 19.9,
"pieces": 96878,
},
time.Unix(0, 0),
),
metric.New(
"welding",
map[string]string{
"source": "machine C",
"location": "factory X",
"status": "failure",
},
map[string]interface{}{
"operating_hours": 1009,
"temperature": 67.3,
"message": "temperature alert",
},
time.Unix(0, 0),
),
}
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestFieldDrop(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Fields: []string{"message", "pieces"},
Action: "drop",
},
},
}
require.NoError(t, plugin.Init())
expected := []telegraf.Metric{
metric.New(
"packing",
map[string]string{
"source": "machine A",
"location": "main building",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 37,
"temperature": 23.1,
},
time.Unix(0, 0),
),
metric.New(
"welding",
map[string]string{
"source": "machine D",
"location": "factory Y",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 825,
"temperature": 31.2,
},
time.Unix(0, 0),
),
}
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestFieldGlob(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Fields: []string{"{message,piece*}"},
Action: "pass",
},
},
DefaultAction: "drop",
}
require.NoError(t, plugin.Init())
expected := []telegraf.Metric{
metric.New(
"foundry",
map[string]string{
"source": "machine B",
"location": "factory X",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 1337,
"temperature": 19.9,
"pieces": 96878,
},
time.Unix(0, 0),
),
metric.New(
"welding",
map[string]string{
"source": "machine C",
"location": "factory X",
"status": "failure",
},
map[string]interface{}{
"operating_hours": 1009,
"temperature": 67.3,
"message": "temperature alert",
},
time.Unix(0, 0),
),
}
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestRuleOrder(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Name: []string{"welding"},
Action: "drop",
},
{
Name: []string{"welding"},
Action: "pass",
},
},
DefaultAction: "drop",
}
require.NoError(t, plugin.Init())
require.Empty(t, plugin.Apply(testmetrics...))
}
func TestRuleMultiple(t *testing.T) {
plugin := &Filter{
Rules: []rule{
{
Name: []string{"welding"},
Action: "drop",
},
{
Name: []string{"foundry"},
Action: "drop",
},
},
DefaultAction: "pass",
}
require.NoError(t, plugin.Init())
expected := []telegraf.Metric{
metric.New(
"packing",
map[string]string{
"source": "machine A",
"location": "main building",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 37,
"temperature": 23.1,
},
time.Unix(0, 0),
),
}
actual := plugin.Apply(testmetrics...)
testutil.RequireMetricsEqual(t, expected, actual)
}
func TestTracking(t *testing.T) {
inputRaw := testmetrics
var mu sync.Mutex
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
notify := func(di telegraf.DeliveryInfo) {
mu.Lock()
defer mu.Unlock()
delivered = append(delivered, di)
}
input := make([]telegraf.Metric, 0, len(inputRaw))
for _, m := range inputRaw {
tm, _ := metric.WithTracking(m.Copy(), notify)
input = append(input, tm)
}
expected := []telegraf.Metric{
metric.New(
"welding",
map[string]string{
"source": "machine C",
"location": "factory X",
"status": "failure",
},
map[string]interface{}{
"operating_hours": 1009,
"temperature": 67.3,
"message": "temperature alert",
},
time.Unix(0, 0),
),
metric.New(
"welding",
map[string]string{
"source": "machine D",
"location": "factory Y",
"status": "OK",
},
map[string]interface{}{
"operating_hours": 825,
"temperature": 31.2,
},
time.Unix(0, 0),
),
}
plugin := &Filter{
Rules: []rule{
{
Name: []string{"welding"},
Action: "pass",
},
},
DefaultAction: "drop",
}
require.NoError(t, plugin.Init())
// Process expected metrics and compare with resulting metrics
actual := plugin.Apply(input...)
testutil.RequireMetricsEqual(t, expected, actual)
// Simulate output acknowledging delivery
for _, m := range actual {
m.Accept()
}
// Check delivery
require.Eventuallyf(t, func() bool {
mu.Lock()
defer mu.Unlock()
return len(input) == len(delivered)
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
}

View file

@ -0,0 +1,87 @@
package filter
import (
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
)
type rule struct {
Name []string `toml:"name"`
Tags map[string][]string `toml:"tags"`
Fields []string `toml:"fields"`
Action string `toml:"action"`
nameFilter filter.Filter
fieldFilter filter.Filter
tagFilters map[string]filter.Filter
pass bool
}
func (r *rule) init() error {
// Check the action setting
switch r.Action {
case "pass":
r.pass = true
case "", "drop":
// Do nothing, those options are valid
default:
return fmt.Errorf("invalid action %q", r.Action)
}
// Compile the filters
var err error
r.nameFilter, err = filter.Compile(r.Name)
if err != nil {
return fmt.Errorf("creating name filter failed: %w", err)
}
r.fieldFilter, err = filter.Compile(r.Fields)
if err != nil {
return fmt.Errorf("creating fields filter failed: %w", err)
}
r.tagFilters = make(map[string]filter.Filter, len(r.Tags))
for k, values := range r.Tags {
r.tagFilters[k], err = filter.Compile(values)
if err != nil {
return fmt.Errorf("creating tag filter for tag %q failed: %w", k, err)
}
}
return nil
}
func (r *rule) apply(m telegraf.Metric) (pass, applies bool) {
// Check the metric name
if r.nameFilter != nil {
if !r.nameFilter.Match(m.Name()) {
return true, false
}
}
// Check the tags if given
tags := m.Tags()
for k, f := range r.tagFilters {
if value, found := tags[k]; !found || !f.Match(value) {
return true, false
}
}
// Check the field names
if r.fieldFilter != nil {
var matches bool
for _, field := range m.FieldList() {
if r.fieldFilter.Match(field.Key) {
matches = true
break
}
}
if !matches {
return true, false
}
}
return r.pass, true
}

View file

@ -0,0 +1,28 @@
# Filter metrics by the given criteria
[[processors.filter]]
## Default action if no rule applies
# default = "pass"
## Rules to apply on the incoming metrics (multiple rules are possible)
## The rules are evaluated in order and the first matching rule is applied.
## In case no rule matches the "default" is applied.
## All filter criteria in a rule must apply for the rule to match the metric
## i.e. the criteria are combined by a logical AND. If a criterion is
## omitted it is NOT applied at all and ignored.
[[processors.filter.rule]]
## List of metric names to match including glob expressions
# name = []
## List of tag key/values pairs to match including glob expressions
## ALL given tags keys must exist and at least one value must match
## for the metric to match the rule.
# tags = {}
## List of field keys to match including glob expressions
## At least one field must exist for the metric to match the rule.
# fields = []
## Action to apply for this rule
## "pass" will keep the metric and pass it on, while "drop" will remove
## the metric
# action = "drop"

View file

@ -0,0 +1,102 @@
# Network Interface Name Processor Plugin
The `ifname` plugin looks up network interface names using SNMP.
Telegraf minimum version: Telegraf 1.15.0
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Secret-store support
This plugin supports secrets from secret-stores for the `auth_password` and
`priv_password` option.
See the [secret-store documentation][SECRETSTORE] for more details on how
to use them.
[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets
## Configuration
```toml @sample.conf
# Add a tag of the network interface name looked up over SNMP by interface number
[[processors.ifname]]
## Name of tag holding the interface number
# tag = "ifIndex"
## Name of output tag where service name will be added
# dest = "ifName"
## Name of tag of the SNMP agent to request the interface name from
## example: agent = "source"
# agent = "agent"
## Timeout for each request.
# timeout = "5s"
## SNMP version; can be 1, 2, or 3.
# version = 2
## SNMP community string.
# community = "public"
## Number of retries to attempt.
# retries = 3
## The GETBULK max-repetitions parameter.
# max_repetitions = 10
## SNMPv3 authentication and encryption options.
##
## Security Name.
# sec_name = "myuser"
## Authentication protocol; one of "MD5", "SHA", or "".
# auth_protocol = "MD5"
## Authentication password.
# auth_password = "pass"
## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
# sec_level = "authNoPriv"
## Context Name.
# context_name = ""
## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
# priv_protocol = ""
## Privacy password used for encrypted messages.
# priv_password = ""
## max_parallel_lookups is the maximum number of SNMP requests to
## make at the same time.
# max_parallel_lookups = 100
## ordered controls whether or not the metrics need to stay in the
## same order this plugin received them in. If false, this plugin
## may change the order when data is cached. If you need metrics to
## stay in order set this to true. keeping the metrics ordered may
## be slightly slower
# ordered = false
## cache_ttl is the amount of time interface names are cached for a
## given agent. After this period elapses if names are needed they
## will be retrieved again.
# cache_ttl = "8h"
```
## Example
Example config:
```toml
[[processors.ifname]]
tag = "ifIndex"
dest = "ifName"
```
```diff
- foo,ifIndex=2,agent=127.0.0.1 field=123 1502489900000000000
+ foo,ifIndex=2,agent=127.0.0.1,ifName=eth0 field=123 1502489900000000000
```

View file

@ -0,0 +1,83 @@
package ifname
// See https://girai.dev/blog/lru-cache-implementation-in-go/
import (
"container/list"
)
type LRUValType = TTLValType
type hashType map[keyType]*list.Element
type LRUCache struct {
cap uint // capacity
l *list.List // doubly linked list
m hashType // hash table for checking if list node exists
}
// Pair is the value of a list node.
type Pair struct {
key keyType
value LRUValType
}
// initializes a new LRUCache.
func NewLRUCache(capacity uint) LRUCache {
return LRUCache{
cap: capacity,
l: new(list.List),
m: make(hashType, capacity),
}
}
// Get a list node from the hash map.
func (c *LRUCache) Get(key keyType) (LRUValType, bool) {
// check if list node exists
if node, ok := c.m[key]; ok {
val := node.Value.(*list.Element).Value.(Pair).value
// move node to front
c.l.MoveToFront(node)
return val, true
}
return LRUValType{}, false
}
// Put key and value in the LRUCache
func (c *LRUCache) Put(key keyType, value LRUValType) {
// check if list node exists
if node, ok := c.m[key]; ok {
// move the node to front
c.l.MoveToFront(node)
// update the value of a list node
node.Value.(*list.Element).Value = Pair{key: key, value: value}
} else {
// delete the last list node if the list is full
if uint(c.l.Len()) == c.cap {
// get the key that we want to delete
idx := c.l.Back().Value.(*list.Element).Value.(Pair).key
// delete the node pointer in the hash map by key
delete(c.m, idx)
// remove the last list node
c.l.Remove(c.l.Back())
}
// initialize a list node
node := &list.Element{
Value: Pair{
key: key,
value: value,
},
}
// push the new list node into the list
ptr := c.l.PushFront(node)
// save the node pointer in the hash map
c.m[key] = ptr
}
}
func (c *LRUCache) Delete(key keyType) {
if node, ok := c.m[key]; ok {
c.l.Remove(node)
delete(c.m, key)
}
}

View file

@ -0,0 +1,23 @@
package ifname
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestCache(t *testing.T) {
c := NewLRUCache(2)
c.Put("ones", LRUValType{val: nameMap{1: "one"}})
twoMap := LRUValType{val: nameMap{2: "two"}}
c.Put("twos", twoMap)
c.Put("threes", LRUValType{val: nameMap{3: "three"}})
_, ok := c.Get("ones")
require.False(t, ok)
v, ok := c.Get("twos")
require.True(t, ok)
require.Equal(t, twoMap, v)
}

View file

@ -0,0 +1,330 @@
//go:generate ../../../tools/readme_config_includer/generator
package ifname
import (
_ "embed"
"errors"
"fmt"
"strconv"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal/snmp"
"github.com/influxdata/telegraf/plugins/common/parallel"
"github.com/influxdata/telegraf/plugins/processors"
)
//go:embed sample.conf
var sampleConfig string
type nameMap map[uint64]string
type keyType = string
type valType = nameMap
type mapFunc func(agent string) (nameMap, error)
type sigMap map[string]chan struct{}
type IfName struct {
SourceTag string `toml:"tag"`
DestTag string `toml:"dest"`
AgentTag string `toml:"agent"`
snmp.ClientConfig
CacheSize uint `toml:"max_cache_entries"`
MaxParallelLookups int `toml:"max_parallel_lookups"`
Ordered bool `toml:"ordered"`
CacheTTL config.Duration `toml:"cache_ttl"`
Log telegraf.Logger `toml:"-"`
ifTable *snmp.Table
ifXTable *snmp.Table
cache *TTLCache
lock sync.Mutex
parallel parallel.Parallel
sigs sigMap
getMapRemote mapFunc
}
const minRetry = 5 * time.Minute
func (*IfName) SampleConfig() string {
return sampleConfig
}
func (d *IfName) Init() error {
d.getMapRemote = d.getMapRemoteNoMock
c := NewTTLCache(time.Duration(d.CacheTTL), d.CacheSize)
d.cache = &c
d.sigs = make(sigMap)
if _, err := snmp.NewWrapper(d.ClientConfig); err != nil {
return fmt.Errorf("parsing SNMP client config: %w", err)
}
return nil
}
func (d *IfName) addTag(metric telegraf.Metric) error {
agent, ok := metric.GetTag(d.AgentTag)
if !ok {
d.Log.Warn("Agent tag missing.")
return nil
}
numS, ok := metric.GetTag(d.SourceTag)
if !ok {
d.Log.Warn("Source tag missing.")
return nil
}
num, err := strconv.ParseUint(numS, 10, 64)
if err != nil {
return errors.New("couldn't parse source tag as uint")
}
firstTime := true
for {
m, age, err := d.getMap(agent)
if err != nil {
return fmt.Errorf("couldn't retrieve the table of interface names for %s: %w", agent, err)
}
name, found := m[num]
if found {
// success
metric.AddTag(d.DestTag, name)
return nil
}
// We have the agent's interface map but it doesn't contain
// the interface we're interested in. If the entry is old
// enough, retrieve it from the agent once more.
if age < minRetry {
return fmt.Errorf("interface number %d isn't in the table of interface names on %s", num, agent)
}
if firstTime {
d.invalidate(agent)
firstTime = false
continue
}
// not found, cache hit, retrying
return fmt.Errorf("missing interface but couldn't retrieve table for %v", agent)
}
}
func (d *IfName) invalidate(agent string) {
d.lock.Lock()
d.cache.Delete(agent)
d.lock.Unlock()
}
func (d *IfName) Start(acc telegraf.Accumulator) error {
var err error
d.ifTable, err = makeTable("1.3.6.1.2.1.2.2.1.2")
if err != nil {
return fmt.Errorf("preparing ifTable: %w", err)
}
d.ifXTable, err = makeTable("1.3.6.1.2.1.31.1.1.1.1")
if err != nil {
return fmt.Errorf("preparing ifXTable: %w", err)
}
fn := func(m telegraf.Metric) []telegraf.Metric {
err := d.addTag(m)
if err != nil {
d.Log.Debugf("Error adding tag: %v", err)
}
return []telegraf.Metric{m}
}
if d.Ordered {
d.parallel = parallel.NewOrdered(acc, fn, 10000, d.MaxParallelLookups)
} else {
d.parallel = parallel.NewUnordered(acc, fn, d.MaxParallelLookups)
}
return nil
}
func (d *IfName) Add(metric telegraf.Metric, _ telegraf.Accumulator) error {
d.parallel.Enqueue(metric)
return nil
}
func (d *IfName) Stop() {
d.parallel.Stop()
}
// getMap gets the interface names map either from cache or from the SNMP
// agent
func (d *IfName) getMap(agent string) (entry nameMap, age time.Duration, err error) {
var sig chan struct{}
d.lock.Lock()
// Check cache
m, ok, age := d.cache.Get(agent)
if ok {
d.lock.Unlock()
return m, age, nil
}
// cache miss. Is this the first request for this agent?
sig, found := d.sigs[agent]
if !found {
// This is the first request. Make signal for subsequent requests to wait on
s := make(chan struct{})
d.sigs[agent] = s
sig = s
}
d.lock.Unlock()
if found {
// This is not the first request. Wait for first to finish.
<-sig
// Check cache again
d.lock.Lock()
m, ok, age := d.cache.Get(agent)
d.lock.Unlock()
if ok {
return m, age, nil
}
return nil, 0, errors.New("getting remote table from cache")
}
// The cache missed and this is the first request for this
// agent. Make the SNMP request
m, err = d.getMapRemote(agent)
d.lock.Lock()
if err != nil {
// snmp failure. signal without saving to cache
close(sig)
delete(d.sigs, agent)
d.lock.Unlock()
return nil, 0, fmt.Errorf("getting remote table: %w", err)
}
// snmp success. Cache response, then signal any other waiting
// requests for this agent and clean up
d.cache.Put(agent, m)
close(sig)
delete(d.sigs, agent)
d.lock.Unlock()
return m, 0, nil
}
func (d *IfName) getMapRemoteNoMock(agent string) (nameMap, error) {
gs, err := snmp.NewWrapper(d.ClientConfig)
if err != nil {
return nil, fmt.Errorf("parsing SNMP client config: %w", err)
}
if err = gs.SetAgent(agent); err != nil {
return nil, fmt.Errorf("parsing agent tag: %w", err)
}
if err = gs.Connect(); err != nil {
return nil, fmt.Errorf("connecting when fetching interface names: %w", err)
}
// try ifXtable and ifName first. if that fails, fall back to
// ifTable and ifDescr
var m nameMap
if m, err = buildMap(gs, d.ifXTable); err == nil {
return m, nil
}
if m, err = buildMap(gs, d.ifTable); err == nil {
return m, nil
}
return nil, fmt.Errorf("fetching interface names: %w", err)
}
func init() {
processors.AddStreaming("ifname", func() telegraf.StreamingProcessor {
return &IfName{
SourceTag: "ifIndex",
DestTag: "ifName",
AgentTag: "agent",
CacheSize: 100,
MaxParallelLookups: 100,
ClientConfig: *snmp.DefaultClientConfig(),
CacheTTL: config.Duration(8 * time.Hour),
}
})
}
func makeTable(oid string) (*snmp.Table, error) {
var err error
tab := snmp.Table{
Name: "ifTable",
IndexAsTag: true,
Fields: []snmp.Field{
{Oid: oid, Name: "ifName"},
},
}
err = tab.Init(nil)
if err != nil {
// Init already wraps
return nil, err
}
return &tab, nil
}
func buildMap(gs snmp.GosnmpWrapper, tab *snmp.Table) (nameMap, error) {
var err error
rtab, err := tab.Build(gs, true)
if err != nil {
// Build already wraps
return nil, err
}
if len(rtab.Rows) == 0 {
return nil, errors.New("empty table")
}
t := make(nameMap)
for _, v := range rtab.Rows {
iStr, ok := v.Tags["index"]
if !ok {
// should always have an index tag because the table should
// always have IndexAsTag true
return nil, errors.New("no index tag")
}
i, err := strconv.ParseUint(iStr, 10, 64)
if err != nil {
return nil, errors.New("index tag isn't a uint")
}
nameIf, ok := v.Fields["ifName"]
if !ok {
return nil, errors.New("ifName field is missing")
}
name, ok := nameIf.(string)
if !ok {
return nil, errors.New("ifName field isn't a string")
}
t[i] = name
}
return t, nil
}

View file

@ -0,0 +1,232 @@
package ifname
import (
"sync"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal/snmp"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
type item struct {
entry nameMap
age time.Duration
err error
}
func TestTableIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
t.Skip("Skipping test due to connect failures")
d := IfName{}
err := d.Init()
require.NoError(t, err)
tab, err := makeTable("1.3.6.1.2.1.2.2.1.2")
require.NoError(t, err)
gs, err := snmp.NewWrapper(*snmp.DefaultClientConfig())
require.NoError(t, err)
err = gs.SetAgent("127.0.0.1")
require.NoError(t, err)
err = gs.Connect()
require.NoError(t, err)
// Could use ifIndex but oid index is always the same
m, err := buildMap(gs, tab)
require.NoError(t, err)
require.NotEmpty(t, m)
}
func TestIfNameIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
t.Skip("Skipping test due to connect failures")
d := IfName{
SourceTag: "ifIndex",
DestTag: "ifName",
AgentTag: "agent",
CacheSize: 1000,
ClientConfig: *snmp.DefaultClientConfig(),
}
err := d.Init()
require.NoError(t, err)
acc := testutil.Accumulator{}
err = d.Start(&acc)
require.NoError(t, err)
m := testutil.MustMetric(
"cpu",
map[string]string{
"ifIndex": "1",
"agent": "127.0.0.1",
},
map[string]interface{}{},
time.Unix(0, 0),
)
expected := testutil.MustMetric(
"cpu",
map[string]string{
"ifIndex": "1",
"agent": "127.0.0.1",
"ifName": "lo",
},
map[string]interface{}{},
time.Unix(0, 0),
)
err = d.addTag(m)
require.NoError(t, err)
testutil.RequireMetricEqual(t, expected, m)
}
func TestGetMap(t *testing.T) {
d := IfName{
CacheSize: 1000,
CacheTTL: config.Duration(10 * time.Second),
}
require.NoError(t, d.Init())
expected := nameMap{
1: "ifname1",
2: "ifname2",
}
var remoteCalls int32
// Mock the snmp transaction
d.getMapRemote = func(string) (nameMap, error) {
atomic.AddInt32(&remoteCalls, 1)
return expected, nil
}
m, age, err := d.getMap("agent")
require.NoError(t, err)
require.Zero(t, age) // Age is zero when map comes from getMapRemote
require.Equal(t, expected, m)
// Remote call should happen the first time getMap runs
require.Equal(t, int32(1), remoteCalls)
const thMax = 3
ch := make(chan item, thMax)
var wg sync.WaitGroup
for th := 0; th < thMax; th++ {
wg.Add(1)
go func() {
defer wg.Done()
m, age, err := d.getMap("agent")
ch <- item{entry: m, age: age, err: err}
}()
}
wg.Wait()
close(ch)
for entry := range ch {
require.NoError(t, entry.err)
require.NotZero(t, entry.age) // Age is nonzero when map comes from cache
require.Equal(t, expected, entry.entry)
}
// Remote call should not happen subsequent times getMap runs
require.Equal(t, int32(1), remoteCalls)
}
func TestTracking(t *testing.T) {
// Setup raw input and expected output
inputRaw := []telegraf.Metric{
metric.New(
"test",
map[string]string{"ifIndex": "1", "agent": "127.0.0.1"},
map[string]interface{}{"value": 42},
time.Unix(0, 0),
),
}
expected := []telegraf.Metric{
metric.New(
"test",
map[string]string{
"ifIndex": "1",
"agent": "127.0.0.1",
"ifName": "lo",
},
map[string]interface{}{"value": 42},
time.Unix(0, 0),
),
}
// Create fake notification for testing
var mu sync.Mutex
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
notify := func(di telegraf.DeliveryInfo) {
mu.Lock()
defer mu.Unlock()
delivered = append(delivered, di)
}
// Convert raw input to tracking metric
input := make([]telegraf.Metric, 0, len(inputRaw))
for _, m := range inputRaw {
tm, _ := metric.WithTracking(m, notify)
input = append(input, tm)
}
// Prepare and start the plugin
plugin := &IfName{
SourceTag: "ifIndex",
DestTag: "ifName",
AgentTag: "agent",
CacheSize: 1000,
CacheTTL: config.Duration(10 * time.Second),
MaxParallelLookups: 100,
}
require.NoError(t, plugin.Init())
plugin.cache.Put("127.0.0.1", nameMap{1: "lo"})
var acc testutil.Accumulator
require.NoError(t, plugin.Start(&acc))
defer plugin.Stop()
// Process expected metrics and compare with resulting metrics
for _, in := range input {
require.NoError(t, plugin.Add(in, &acc))
}
require.Eventually(t, func() bool {
return int(acc.NMetrics()) >= len(expected)
}, 3*time.Second, 100*time.Microsecond)
actual := acc.GetTelegrafMetrics()
testutil.RequireMetricsEqual(t, expected, actual)
// Simulate output acknowledging delivery
for _, m := range actual {
m.Accept()
}
// Check delivery
require.Eventuallyf(t, func() bool {
mu.Lock()
defer mu.Unlock()
return len(input) == len(delivered)
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
}

View file

@ -0,0 +1,59 @@
# Add a tag of the network interface name looked up over SNMP by interface number
[[processors.ifname]]
## Name of tag holding the interface number
# tag = "ifIndex"
## Name of output tag where service name will be added
# dest = "ifName"
## Name of tag of the SNMP agent to request the interface name from
## example: agent = "source"
# agent = "agent"
## Timeout for each request.
# timeout = "5s"
## SNMP version; can be 1, 2, or 3.
# version = 2
## SNMP community string.
# community = "public"
## Number of retries to attempt.
# retries = 3
## The GETBULK max-repetitions parameter.
# max_repetitions = 10
## SNMPv3 authentication and encryption options.
##
## Security Name.
# sec_name = "myuser"
## Authentication protocol; one of "MD5", "SHA", or "".
# auth_protocol = "MD5"
## Authentication password.
# auth_password = "pass"
## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
# sec_level = "authNoPriv"
## Context Name.
# context_name = ""
## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
# priv_protocol = ""
## Privacy password used for encrypted messages.
# priv_password = ""
## max_parallel_lookups is the maximum number of SNMP requests to
## make at the same time.
# max_parallel_lookups = 100
## ordered controls whether or not the metrics need to stay in the
## same order this plugin received them in. If false, this plugin
## may change the order when data is cached. If you need metrics to
## stay in order set this to true. keeping the metrics ordered may
## be slightly slower
# ordered = false
## cache_ttl is the amount of time interface names are cached for a
## given agent. After this period elapses if names are needed they
## will be retrieved again.
# cache_ttl = "8h"

Some files were not shown because too many files have changed in this diff Show more