Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
139
plugins/processors/execd/README.md
Normal file
139
plugins/processors/execd/README.md
Normal file
|
@ -0,0 +1,139 @@
|
|||
# Execd Processor Plugin
|
||||
|
||||
The `execd` processor plugin runs an external program as a separate process and
|
||||
pipes metrics in to the process's STDIN and reads processed metrics from its
|
||||
STDOUT. The programs must accept influx line protocol on standard in (STDIN)
|
||||
and output metrics in influx line protocol to standard output (STDOUT).
|
||||
|
||||
Program output on standard error is mirrored to the telegraf log.
|
||||
|
||||
Telegraf minimum version: Telegraf 1.15.0
|
||||
|
||||
## Caveats
|
||||
|
||||
- Metrics with tracking will be considered "delivered" as soon as they are passed
|
||||
to the external process. There is currently no way to match up which metric
|
||||
coming out of the execd process relates to which metric going in (keep in mind
|
||||
that processors can add and drop metrics, and that this is all done
|
||||
asynchronously).
|
||||
- it's not currently possible to use a data_format other than "influx", due to
|
||||
the requirement that it is serialize-parse symmetrical and does not lose any
|
||||
critical type data.
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Run executable as long-running processor plugin
|
||||
[[processors.execd]]
|
||||
## One program to run as daemon.
|
||||
## NOTE: process and each argument should each be their own string
|
||||
## eg: command = ["/path/to/your_program", "arg1", "arg2"]
|
||||
command = ["cat"]
|
||||
|
||||
## Environment variables
|
||||
## Array of "key=value" pairs to pass as environment variables
|
||||
## e.g. "KEY=value", "USERNAME=John Doe",
|
||||
## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs"
|
||||
# environment = []
|
||||
|
||||
## Delay before the process is restarted after an unexpected termination
|
||||
# restart_delay = "10s"
|
||||
|
||||
## Serialization format for communicating with the executed program
|
||||
## Please note that the corresponding data-format must exist both in
|
||||
## parsers and serializers
|
||||
# data_format = "influx"
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
### Go daemon example
|
||||
|
||||
This go daemon reads a metric from stdin, multiplies the "count" field by 2,
|
||||
and writes the metric back out.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
)
|
||||
|
||||
func main() {
|
||||
parser := influx.NewStreamParser(os.Stdin)
|
||||
serializer := serializers_influx.Serializer{}
|
||||
if err := serializer.Init(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "serializer init failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for {
|
||||
metric, err := parser.Next()
|
||||
if err != nil {
|
||||
if err == influx.EOF {
|
||||
return // stream ended
|
||||
}
|
||||
if parseErr, isParseError := err.(*influx.ParseError); isParseError {
|
||||
fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "ERR %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
c, found := metric.GetField("count")
|
||||
if !found {
|
||||
fmt.Fprintf(os.Stderr, "metric has no count field\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
switch t := c.(type) {
|
||||
case float64:
|
||||
t *= 2
|
||||
metric.AddField("count", t)
|
||||
case int64:
|
||||
t *= 2
|
||||
metric.AddField("count", t)
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "count is not an unknown type, it's a %T\n", c)
|
||||
os.Exit(1)
|
||||
}
|
||||
b, err := serializer.Serialize(metric)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERR %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Fprint(os.Stdout, string(b))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
to run it, you'd build the binary using go, eg `go build -o multiplier.exe
|
||||
main.go`
|
||||
|
||||
```toml
|
||||
[[processors.execd]]
|
||||
command = ["multiplier.exe"]
|
||||
```
|
||||
|
||||
### Ruby daemon
|
||||
|
||||
- See [Ruby daemon](./examples/multiplier_line_protocol/multiplier_line_protocol.rb)
|
||||
|
||||
```toml
|
||||
[[processors.execd]]
|
||||
command = ["ruby", "plugins/processors/execd/examples/multiplier_line_protocol/multiplier_line_protocol.rb"]
|
||||
```
|
|
@ -0,0 +1,14 @@
|
|||
[agent]
|
||||
interval = "10s"
|
||||
|
||||
[[inputs.execd]]
|
||||
command = ["ruby", "plugins/inputs/execd/examples/count.rb"]
|
||||
|
||||
[[processors.execd]]
|
||||
command = ["ruby", "plugins/processors/execd/examples/multiplier_line_protocol/multiplier_line_protocol.rb"]
|
||||
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
data_format = "influx"
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
loop do
|
||||
# example input: "counter_ruby count=0 1586302128978187000"
|
||||
line = STDIN.readline.chomp
|
||||
# parse out influx line protocol sections with a really simple hand-rolled parser that doesn't support escaping.
|
||||
# for a full line parser in ruby, check out something like the influxdb-lineprotocol-parser gem.
|
||||
parts = line.split(" ")
|
||||
case parts.size
|
||||
when 3
|
||||
measurement, fields, timestamp = parts
|
||||
when 4
|
||||
measurement, tags, fields, timestamp = parts
|
||||
else
|
||||
STDERR.puts "Unable to parse line protocol"
|
||||
exit 1
|
||||
end
|
||||
fields = fields.split(",").map{|t|
|
||||
k,v = t.split("=")
|
||||
if k == "count"
|
||||
v = v.to_i * 2 # multiple count metric by two
|
||||
end
|
||||
"#{k}=#{v}"
|
||||
}.join(",")
|
||||
puts [measurement, tags, fields, timestamp].select{|s| s && s.size != 0 }.join(" ")
|
||||
STDOUT.flush
|
||||
end
|
184
plugins/processors/execd/execd.go
Normal file
184
plugins/processors/execd/execd.go
Normal file
|
@ -0,0 +1,184 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package execd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal/process"
|
||||
"github.com/influxdata/telegraf/models"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type Execd struct {
|
||||
Command []string `toml:"command"`
|
||||
Environment []string `toml:"environment"`
|
||||
RestartDelay config.Duration `toml:"restart_delay"`
|
||||
Log telegraf.Logger
|
||||
|
||||
parser telegraf.Parser
|
||||
serializer telegraf.Serializer
|
||||
acc telegraf.Accumulator
|
||||
process *process.Process
|
||||
}
|
||||
|
||||
func (e *Execd) SetParser(p telegraf.Parser) {
|
||||
e.parser = p
|
||||
}
|
||||
|
||||
func (e *Execd) SetSerializer(s telegraf.Serializer) {
|
||||
e.serializer = s
|
||||
}
|
||||
|
||||
func (*Execd) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (e *Execd) Start(acc telegraf.Accumulator) error {
|
||||
e.acc = acc
|
||||
|
||||
var err error
|
||||
e.process, err = process.New(e.Command, e.Environment)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating new process: %w", err)
|
||||
}
|
||||
e.process.Log = e.Log
|
||||
e.process.RestartDelay = time.Duration(e.RestartDelay)
|
||||
e.process.ReadStdoutFn = e.cmdReadOut
|
||||
e.process.ReadStderrFn = e.cmdReadErr
|
||||
|
||||
if err = e.process.Start(); err != nil {
|
||||
// if there was only one argument, and it contained spaces, warn the user
|
||||
// that they may have configured it wrong.
|
||||
if len(e.Command) == 1 && strings.Contains(e.Command[0], " ") {
|
||||
e.Log.Warn("The processors.execd Command contained spaces but no arguments. " +
|
||||
"This setting expects the program and arguments as an array of strings, " +
|
||||
"not as a space-delimited string. See the plugin readme for an example.")
|
||||
}
|
||||
return fmt.Errorf("failed to start process %s: %w", e.Command, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Execd) Add(m telegraf.Metric, _ telegraf.Accumulator) error {
|
||||
b, err := e.serializer.Serialize(m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metric serializing error: %w", err)
|
||||
}
|
||||
|
||||
_, err = e.process.Stdin.Write(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing to process stdin: %w", err)
|
||||
}
|
||||
|
||||
// We cannot maintain tracking metrics at the moment because input/output
|
||||
// is done asynchronously and we don't have any metric metadata to tie the
|
||||
// output metric back to the original input metric.
|
||||
m.Accept()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Execd) Stop() {
|
||||
e.process.Stop()
|
||||
}
|
||||
|
||||
func (e *Execd) cmdReadOut(out io.Reader) {
|
||||
// Prefer using the StreamParser when parsing influx format.
|
||||
var parser telegraf.Parser
|
||||
if rp, ok := e.parser.(*models.RunningParser); ok {
|
||||
parser = rp.Parser
|
||||
} else {
|
||||
parser = e.parser
|
||||
}
|
||||
|
||||
if _, isInfluxParser := parser.(*influx.Parser); isInfluxParser {
|
||||
e.cmdReadOutStream(out)
|
||||
return
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(out)
|
||||
scanBuf := make([]byte, 4096)
|
||||
scanner.Buffer(scanBuf, 262144)
|
||||
|
||||
for scanner.Scan() {
|
||||
metrics, err := e.parser.Parse(scanner.Bytes())
|
||||
if err != nil {
|
||||
e.Log.Errorf("Parse error: %s", err)
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
e.acc.AddMetric(metric)
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
e.Log.Errorf("Error reading stdout: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Execd) cmdReadOutStream(out io.Reader) {
|
||||
parser := influx.NewStreamParser(out)
|
||||
|
||||
for {
|
||||
metric, err := parser.Next()
|
||||
|
||||
if err != nil {
|
||||
// Stop parsing when we've reached the end.
|
||||
if errors.Is(err, influx.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
var parseErr *influx.ParseError
|
||||
if errors.As(err, &parseErr) {
|
||||
// Continue past parse errors.
|
||||
e.acc.AddError(parseErr)
|
||||
continue
|
||||
}
|
||||
|
||||
// Stop reading on any non-recoverable error.
|
||||
e.acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
e.acc.AddMetric(metric)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Execd) cmdReadErr(out io.Reader) {
|
||||
scanner := bufio.NewScanner(out)
|
||||
|
||||
for scanner.Scan() {
|
||||
e.Log.Errorf("stderr: %q", scanner.Text())
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
e.Log.Errorf("Error reading stderr: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Execd) Init() error {
|
||||
if len(e.Command) == 0 {
|
||||
return errors.New("no command specified")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
processors.AddStreaming("execd", func() telegraf.StreamingProcessor {
|
||||
return &Execd{
|
||||
RestartDelay: config.Duration(10 * time.Second),
|
||||
}
|
||||
})
|
||||
}
|
492
plugins/processors/execd/execd_test.go
Normal file
492
plugins/processors/execd/execd_test.go
Normal file
|
@ -0,0 +1,492 @@
|
|||
package execd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
_ "github.com/influxdata/telegraf/plugins/parsers/all"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
_ "github.com/influxdata/telegraf/plugins/serializers/all"
|
||||
serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestExternalProcessorWorks(t *testing.T) {
|
||||
// Determine name of the test executable for mocking an external program
|
||||
exe, err := os.Executable()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup the plugin
|
||||
plugin := &Execd{
|
||||
Command: []string{
|
||||
exe,
|
||||
"-case", "multiply",
|
||||
"-field", "count",
|
||||
},
|
||||
Environment: []string{"PLUGINS_PROCESSORS_EXECD_MODE=application"},
|
||||
RestartDelay: config.Duration(5 * time.Second),
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
// Setup the parser and serializer in the processor
|
||||
parser := &influx.Parser{}
|
||||
require.NoError(t, parser.Init())
|
||||
plugin.SetParser(parser)
|
||||
|
||||
serializer := &serializers_influx.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
plugin.SetSerializer(serializer)
|
||||
|
||||
// Setup the input and expected output metrucs
|
||||
now := time.Now()
|
||||
var input []telegraf.Metric
|
||||
var expected []telegraf.Metric
|
||||
for i := 0; i < 10; i++ {
|
||||
m := metric.New(
|
||||
"test",
|
||||
map[string]string{"city": "Toronto"},
|
||||
map[string]interface{}{"population": 6000000, "count": 1},
|
||||
now.Add(time.Duration(i)),
|
||||
)
|
||||
input = append(input, m)
|
||||
|
||||
e := m.Copy()
|
||||
e.AddField("count", 2)
|
||||
expected = append(expected, e)
|
||||
}
|
||||
|
||||
// Perform the test and check the result
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Start(&acc))
|
||||
defer plugin.Stop()
|
||||
for _, m := range input {
|
||||
require.NoError(t, plugin.Add(m, &acc))
|
||||
}
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return acc.NMetrics() >= uint64(len(expected))
|
||||
}, 3*time.Second, 100*time.Millisecond)
|
||||
|
||||
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
|
||||
}
|
||||
|
||||
func TestParseLinesWithNewLines(t *testing.T) {
|
||||
// Determine name of the test executable for mocking an external program
|
||||
exe, err := os.Executable()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup the plugin
|
||||
plugin := &Execd{
|
||||
Command: []string{
|
||||
exe,
|
||||
"-case", "multiply",
|
||||
"-field", "count",
|
||||
},
|
||||
Environment: []string{"PLUGINS_PROCESSORS_EXECD_MODE=application"},
|
||||
RestartDelay: config.Duration(5 * time.Second),
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
// Setup the parser and serializer in the processor
|
||||
parser := &influx.Parser{}
|
||||
require.NoError(t, parser.Init())
|
||||
plugin.SetParser(parser)
|
||||
|
||||
serializer := &serializers_influx.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
plugin.SetSerializer(serializer)
|
||||
|
||||
// Setup the input and expected output metrucs
|
||||
now := time.Now()
|
||||
input := metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"author": "Mr. Gopher",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"phrase": "Gophers are amazing creatures.\nAbsolutely amazing.",
|
||||
"count": 3,
|
||||
},
|
||||
now,
|
||||
)
|
||||
expected := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"author": "Mr. Gopher"},
|
||||
map[string]interface{}{
|
||||
"phrase": "Gophers are amazing creatures.\nAbsolutely amazing.",
|
||||
"count": 6,
|
||||
},
|
||||
now,
|
||||
),
|
||||
}
|
||||
|
||||
// Perform the test and check the result
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Start(&acc))
|
||||
defer plugin.Stop()
|
||||
require.NoError(t, plugin.Add(input, &acc))
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return acc.NMetrics() >= uint64(len(expected))
|
||||
}, 3*time.Second, 100*time.Millisecond)
|
||||
|
||||
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
|
||||
}
|
||||
|
||||
func TestLongLinesForLineProtocol(t *testing.T) {
|
||||
// Determine name of the test executable for mocking an external program
|
||||
exe, err := os.Executable()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup the plugin
|
||||
plugin := &Execd{
|
||||
Command: []string{
|
||||
exe,
|
||||
"-case", "long",
|
||||
"-field", "long",
|
||||
},
|
||||
Environment: []string{"PLUGINS_PROCESSORS_EXECD_MODE=application"},
|
||||
RestartDelay: config.Duration(5 * time.Second),
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
// Setup the parser and serializer in the processor
|
||||
parser := &influx.Parser{}
|
||||
require.NoError(t, parser.Init())
|
||||
plugin.SetParser(parser)
|
||||
|
||||
serializer := &serializers_influx.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
plugin.SetSerializer(serializer)
|
||||
|
||||
// Setup the input and expected output metrucs
|
||||
now := time.Now()
|
||||
input := metric.New(
|
||||
"test",
|
||||
map[string]string{"author": "Mr. Gopher"},
|
||||
map[string]interface{}{"count": 3},
|
||||
now,
|
||||
)
|
||||
expected := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"author": "Mr. Gopher"},
|
||||
map[string]interface{}{
|
||||
"long": strings.Repeat("foobar", 280_000/6),
|
||||
"count": 3,
|
||||
},
|
||||
now,
|
||||
),
|
||||
}
|
||||
|
||||
// Perform the test and check the result
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Start(&acc))
|
||||
defer plugin.Stop()
|
||||
require.NoError(t, plugin.Add(input, &acc))
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return acc.NMetrics() >= uint64(len(expected))
|
||||
}, 3*time.Second, 100*time.Millisecond)
|
||||
|
||||
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
|
||||
}
|
||||
|
||||
func TestCases(t *testing.T) {
|
||||
// Get all directories in testcases
|
||||
folders, err := os.ReadDir("testcases")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make sure tests contains data
|
||||
require.NotEmpty(t, folders)
|
||||
|
||||
// Set up for file inputs
|
||||
processors.AddStreaming("execd", func() telegraf.StreamingProcessor {
|
||||
return &Execd{RestartDelay: config.Duration(10 * time.Second)}
|
||||
})
|
||||
|
||||
for _, f := range folders {
|
||||
// Only handle folders
|
||||
if !f.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
fname := f.Name()
|
||||
t.Run(fname, func(t *testing.T) {
|
||||
testdataPath := filepath.Join("testcases", fname)
|
||||
configFilename := filepath.Join(testdataPath, "telegraf.conf")
|
||||
inputFilename := filepath.Join(testdataPath, "input.influx")
|
||||
expectedFilename := filepath.Join(testdataPath, "expected.out")
|
||||
|
||||
// Get parser to parse input and expected output
|
||||
parser := &influx.Parser{}
|
||||
require.NoError(t, parser.Init())
|
||||
|
||||
input, err := testutil.ParseMetricsFromFile(inputFilename, parser)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err := testutil.ParseMetricsFromFile(expectedFilename, parser)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Configure the plugin
|
||||
cfg := config.NewConfig()
|
||||
require.NoError(t, cfg.LoadConfig(configFilename))
|
||||
require.Len(t, cfg.Processors, 1, "wrong number of outputs")
|
||||
plugin := cfg.Processors[0].Processor
|
||||
|
||||
// Process the metrics
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Start(&acc))
|
||||
for _, m := range input {
|
||||
require.NoError(t, plugin.Add(m, &acc))
|
||||
}
|
||||
plugin.Stop()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return acc.NMetrics() >= uint64(len(expected))
|
||||
}, time.Second, 100*time.Millisecond)
|
||||
|
||||
// Check the expectations
|
||||
actual := acc.GetTelegrafMetrics()
|
||||
testutil.RequireMetricsEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTracking(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
// Setup the raw input and expected output data
|
||||
inputRaw := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"city": "Toronto",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"population": 6000000,
|
||||
"count": 1,
|
||||
},
|
||||
now,
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"city": "Tokio",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"population": 14000000,
|
||||
"count": 8,
|
||||
},
|
||||
now,
|
||||
),
|
||||
}
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"city": "Toronto",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"population": 6000000,
|
||||
"count": 2,
|
||||
},
|
||||
now,
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"city": "Tokio",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"population": 14000000,
|
||||
"count": 16,
|
||||
},
|
||||
now,
|
||||
),
|
||||
}
|
||||
|
||||
// Create a testing notifier
|
||||
var mu sync.Mutex
|
||||
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
|
||||
notify := func(di telegraf.DeliveryInfo) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
delivered = append(delivered, di)
|
||||
}
|
||||
|
||||
// Convert raw input to tracking metrics
|
||||
input := make([]telegraf.Metric, 0, len(inputRaw))
|
||||
for _, m := range inputRaw {
|
||||
tm, _ := metric.WithTracking(m, notify)
|
||||
input = append(input, tm)
|
||||
}
|
||||
|
||||
// Setup the plugin
|
||||
exe, err := os.Executable()
|
||||
require.NoError(t, err)
|
||||
|
||||
plugin := &Execd{
|
||||
Command: []string{
|
||||
exe,
|
||||
"-case", "multiply",
|
||||
"-field", "count",
|
||||
},
|
||||
Environment: []string{"PLUGINS_PROCESSORS_EXECD_MODE=application"},
|
||||
RestartDelay: config.Duration(5 * time.Second),
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
|
||||
parser := &influx.Parser{}
|
||||
require.NoError(t, parser.Init())
|
||||
plugin.SetParser(parser)
|
||||
|
||||
serializer := &serializers_influx.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
plugin.SetSerializer(serializer)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Start(&acc))
|
||||
defer plugin.Stop()
|
||||
|
||||
// Process expected metrics and compare with resulting metrics
|
||||
for _, in := range input {
|
||||
require.NoError(t, plugin.Add(in, &acc))
|
||||
}
|
||||
require.Eventually(t, func() bool {
|
||||
return int(acc.NMetrics()) >= len(expected)
|
||||
}, 3*time.Second, 100*time.Millisecond)
|
||||
|
||||
actual := acc.GetTelegrafMetrics()
|
||||
testutil.RequireMetricsEqual(t, expected, actual)
|
||||
|
||||
// Simulate output acknowledging delivery
|
||||
for _, m := range actual {
|
||||
m.Accept()
|
||||
}
|
||||
|
||||
// Check delivery
|
||||
require.Eventuallyf(t, func() bool {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return len(input) == len(delivered)
|
||||
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var testcase, field string
|
||||
flag.StringVar(&testcase, "case", "", "test-case to mock [multiply, long]")
|
||||
flag.StringVar(&field, "field", "count", "name of the field to multiply")
|
||||
flag.Parse()
|
||||
|
||||
if os.Getenv("PLUGINS_PROCESSORS_EXECD_MODE") != "application" || testcase == "" {
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
switch testcase {
|
||||
case "multiply":
|
||||
os.Exit(runTestCaseMultiply(field))
|
||||
case "long":
|
||||
os.Exit(runTestCaseLong(field))
|
||||
}
|
||||
os.Exit(5)
|
||||
}
|
||||
|
||||
func runTestCaseMultiply(field string) int {
|
||||
parser := influx.NewStreamParser(os.Stdin)
|
||||
serializer := &serializers_influx.Serializer{}
|
||||
if err := serializer.Init(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "initialization ERR %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
for {
|
||||
m, err := parser.Next()
|
||||
if err != nil {
|
||||
if errors.Is(err, influx.EOF) {
|
||||
return 0
|
||||
}
|
||||
var parseErr *influx.ParseError
|
||||
if errors.As(err, &parseErr) {
|
||||
fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr)
|
||||
return 1
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "ERR %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
c, found := m.GetField(field)
|
||||
if !found {
|
||||
fmt.Fprintf(os.Stderr, "metric has no field %q\n", field)
|
||||
return 1
|
||||
}
|
||||
switch t := c.(type) {
|
||||
case float64:
|
||||
m.AddField(field, t*2)
|
||||
case int64:
|
||||
m.AddField(field, t*2)
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "%s has an unknown type, it's a %T\n", field, c)
|
||||
return 1
|
||||
}
|
||||
b, err := serializer.Serialize(m)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERR %v\n", err)
|
||||
return 1
|
||||
}
|
||||
fmt.Fprint(os.Stdout, string(b))
|
||||
}
|
||||
}
|
||||
|
||||
func runTestCaseLong(field string) int {
|
||||
parser := influx.NewStreamParser(os.Stdin)
|
||||
serializer := &serializers_influx.Serializer{}
|
||||
if err := serializer.Init(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "initialization ERR %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
// Setup a field with a lot of characters to exceed the scanner limit
|
||||
long := strings.Repeat("foobar", 280_000/6)
|
||||
|
||||
for {
|
||||
m, err := parser.Next()
|
||||
if err != nil {
|
||||
if errors.Is(err, influx.EOF) {
|
||||
return 0
|
||||
}
|
||||
var parseErr *influx.ParseError
|
||||
if errors.As(err, &parseErr) {
|
||||
fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr)
|
||||
return 1
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "ERR %v\n", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
m.AddField(field, long)
|
||||
|
||||
b, err := serializer.Serialize(m)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERR %v\n", err)
|
||||
return 1
|
||||
}
|
||||
fmt.Fprint(os.Stdout, string(b))
|
||||
}
|
||||
}
|
20
plugins/processors/execd/sample.conf
Normal file
20
plugins/processors/execd/sample.conf
Normal file
|
@ -0,0 +1,20 @@
|
|||
# Run executable as long-running processor plugin
|
||||
[[processors.execd]]
|
||||
## One program to run as daemon.
|
||||
## NOTE: process and each argument should each be their own string
|
||||
## eg: command = ["/path/to/your_program", "arg1", "arg2"]
|
||||
command = ["cat"]
|
||||
|
||||
## Environment variables
|
||||
## Array of "key=value" pairs to pass as environment variables
|
||||
## e.g. "KEY=value", "USERNAME=John Doe",
|
||||
## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs"
|
||||
# environment = []
|
||||
|
||||
## Delay before the process is restarted after an unexpected termination
|
||||
# restart_delay = "10s"
|
||||
|
||||
## Serialization format for communicating with the executed program
|
||||
## Please note that the corresponding data-format must exist both in
|
||||
## parsers and serializers
|
||||
# data_format = "influx"
|
|
@ -0,0 +1,5 @@
|
|||
cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000123
|
||||
cpu,cpu=cpu-total,host=Munin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000456
|
||||
cpu,cpu=cpu-total,host=Thor usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000789
|
||||
disk,device=nvme0n1p4,fstype=ext4,host=Hugin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000111
|
||||
disk,device=nvme0n1p4,fstype=ext4,host=Munin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000222
|
|
@ -0,0 +1,5 @@
|
|||
cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000123
|
||||
cpu,cpu=cpu-total,host=Munin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000456
|
||||
cpu,cpu=cpu-total,host=Thor usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000789
|
||||
disk,device=nvme0n1p4,fstype=ext4,host=Hugin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000111
|
||||
disk,device=nvme0n1p4,fstype=ext4,host=Munin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000222
|
|
@ -0,0 +1,3 @@
|
|||
[[processors.execd]]
|
||||
command = ["go", "run", "testcases/pass-through.go"]
|
||||
data_format = "influx"
|
|
@ -0,0 +1,5 @@
|
|||
cpu fields_usage_guest=0,fields_usage_guest_nice=0,fields_usage_idle=99.75000000049295,fields_usage_iowait=0,fields_usage_irq=0.1250000000007958,fields_usage_nice=0,fields_usage_softirq=0,fields_usage_steal=0,fields_usage_system=0,fields_usage_user=0.12500000000363798 1678124473000000000
|
||||
cpu fields_usage_guest=0,fields_usage_guest_nice=0,fields_usage_idle=99.75000000049295,fields_usage_iowait=0,fields_usage_irq=0.1250000000007958,fields_usage_nice=0,fields_usage_softirq=0,fields_usage_steal=0,fields_usage_system=0,fields_usage_user=0.12500000000363798 1678124473000000000
|
||||
cpu fields_usage_guest=0,fields_usage_guest_nice=0,fields_usage_idle=99.75000000049295,fields_usage_iowait=0,fields_usage_irq=0.1250000000007958,fields_usage_nice=0,fields_usage_softirq=0,fields_usage_steal=0,fields_usage_system=0,fields_usage_user=0.12500000000363798 1678124473000000000
|
||||
disk fields_free=65652391936,fields_inodes_free=40445279,fields_inodes_total=45047808,fields_inodes_used=4602529,fields_total=725328994304,fields_used=622756728832,fields_used_percent=90.4631722684 1678124473000000000
|
||||
disk fields_free=65652391936,fields_inodes_free=40445279,fields_inodes_total=45047808,fields_inodes_used=4602529,fields_total=725328994304,fields_used=622756728832,fields_used_percent=90.4631722684 1678124473000000000
|
|
@ -0,0 +1,5 @@
|
|||
cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000123
|
||||
cpu,cpu=cpu-total,host=Munin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000456
|
||||
cpu,cpu=cpu-total,host=Thor usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000789
|
||||
disk,device=nvme0n1p4,fstype=ext4,host=Hugin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000111
|
||||
disk,device=nvme0n1p4,fstype=ext4,host=Munin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000222
|
|
@ -0,0 +1,6 @@
|
|||
[[processors.execd]]
|
||||
command = ["go", "run", "testcases/pass-through.go"]
|
||||
data_format = "json"
|
||||
json_name_key = "name"
|
||||
json_time_key = "timestamp"
|
||||
json_time_format = "unix"
|
5
plugins/processors/execd/testcases/defaults/expected.out
Normal file
5
plugins/processors/execd/testcases/defaults/expected.out
Normal file
|
@ -0,0 +1,5 @@
|
|||
cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000123
|
||||
cpu,cpu=cpu-total,host=Munin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000456
|
||||
cpu,cpu=cpu-total,host=Thor usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000789
|
||||
disk,device=nvme0n1p4,fstype=ext4,host=Hugin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000111
|
||||
disk,device=nvme0n1p4,fstype=ext4,host=Munin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000222
|
5
plugins/processors/execd/testcases/defaults/input.influx
Normal file
5
plugins/processors/execd/testcases/defaults/input.influx
Normal file
|
@ -0,0 +1,5 @@
|
|||
cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000123
|
||||
cpu,cpu=cpu-total,host=Munin usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000456
|
||||
cpu,cpu=cpu-total,host=Thor usage_guest=0,usage_guest_nice=0,usage_idle=99.75000000049295,usage_iowait=0,usage_irq=0.1250000000007958,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0.12500000000363798 1678124473000000789
|
||||
disk,device=nvme0n1p4,fstype=ext4,host=Hugin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000111
|
||||
disk,device=nvme0n1p4,fstype=ext4,host=Munin,mode=rw,path=/ free=65652391936i,inodes_free=40445279i,inodes_total=45047808i,inodes_used=4602529i,total=725328994304i,used=622756728832i,used_percent=90.4631722684 1678124473000000222
|
|
@ -0,0 +1,2 @@
|
|||
[[processors.execd]]
|
||||
command = ["go", "run", "testcases/pass-through.go"]
|
18
plugins/processors/execd/testcases/pass-through.go
Normal file
18
plugins/processors/execd/testcases/pass-through.go
Normal file
|
@ -0,0 +1,18 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var reader = bufio.NewReader(os.Stdin)
|
||||
for {
|
||||
message, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(message)
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue