Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
91
plugins/outputs/remotefile/README.md
Normal file
91
plugins/outputs/remotefile/README.md
Normal file
|
@ -0,0 +1,91 @@
|
|||
# Remote File Output Plugin
|
||||
|
||||
This plugin writes metrics to files in a remote location using the
|
||||
[rclone library][rclone]. Currently the following backends are supported:
|
||||
|
||||
- `local`: [Local filesystem](https://rclone.org/local/)
|
||||
- `s3`: [Amazon S3 storage providers](https://rclone.org/s3/)
|
||||
- `sftp`: [Secure File Transfer Protocol](https://rclone.org/sftp/)
|
||||
|
||||
⭐ Telegraf v1.32.0
|
||||
🏷️ datastore
|
||||
💻 all
|
||||
|
||||
[rclone]: https://rclone.org
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Secret-store support
|
||||
|
||||
This plugin supports secrets from secret-stores for the `remote` option.
|
||||
See the [secret-store documentation][SECRETSTORE] for more details on how
|
||||
to use them.
|
||||
|
||||
[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Send telegraf metrics to file(s) in a remote filesystem
|
||||
[[outputs.remotefile]]
|
||||
## Remote location according to https://rclone.org/#providers
|
||||
## Check the backend configuration options and specify them in
|
||||
## <backend type>[,<param1>=<value1>[,...,<paramN>=<valueN>]]:[root]
|
||||
## for example:
|
||||
## remote = 's3,provider=AWS,access_key_id=...,secret_access_key=...,session_token=...,region=us-east-1:mybucket'
|
||||
## By default, remote is the local current directory
|
||||
# remote = "local:"
|
||||
|
||||
## Files to write in the remote location
|
||||
## Each file can be a Golang template for generating the filename from metrics.
|
||||
## See https://pkg.go.dev/text/template for a reference and use the metric
|
||||
## name (`{{.Name}}`), tag values (`{{.Tag "name"}}`), field values
|
||||
## (`{{.Field "name"}}`) or the metric time (`{{.Time}}) to derive the
|
||||
## filename.
|
||||
## The 'files' setting may contain directories relative to the root path
|
||||
## defined in 'remote'.
|
||||
files = ['{{.Name}}-{{.Time.Format "2006-01-02"}}']
|
||||
|
||||
## Use batch serialization format instead of line based delimiting.
|
||||
## The batch format allows for the production of non-line-based output formats
|
||||
## and may more efficiently encode metrics.
|
||||
# use_batch_format = false
|
||||
|
||||
## Cache settings
|
||||
## Time to wait for all writes to complete on shutdown of the plugin.
|
||||
# final_write_timeout = "10s"
|
||||
|
||||
## Time to wait between writing to a file and uploading to the remote location
|
||||
# cache_write_back = "5s"
|
||||
|
||||
## Maximum size of the cache on disk (infinite by default)
|
||||
# cache_max_size = -1
|
||||
|
||||
## Forget files after not being touched for longer than the given time
|
||||
## This is useful to prevent memory leaks when using time-based filenames
|
||||
## as it allows internal structures to be cleaned up.
|
||||
## Note: When writing to a file after is has been forgotten, the file is
|
||||
## treated as a new file which might cause file-headers to be appended
|
||||
## again by certain serializers like CSV.
|
||||
## By default files will be kept indefinitely.
|
||||
# forget_files_after = "0s"
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
## Available custom functions
|
||||
|
||||
The following functions can be used in the templates:
|
||||
|
||||
- `now`: returns the current time (example: `{{now.Format "2006-01-02"}}`)
|
8
plugins/outputs/remotefile/backends.go
Normal file
8
plugins/outputs/remotefile/backends.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
package remotefile
|
||||
|
||||
import (
|
||||
// Register backends
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
)
|
273
plugins/outputs/remotefile/remotefile.go
Normal file
273
plugins/outputs/remotefile/remotefile.go
Normal file
|
@ -0,0 +1,273 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package remotefile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type File struct {
|
||||
Remote config.Secret `toml:"remote"`
|
||||
Files []string `toml:"files"`
|
||||
FinalWriteTimeout config.Duration `toml:"final_write_timeout"`
|
||||
WriteBackInterval config.Duration `toml:"cache_write_back"`
|
||||
MaxCacheSize config.Size `toml:"cache_max_size"`
|
||||
UseBatchFormat bool `toml:"use_batch_format"`
|
||||
Trace bool `toml:"trace" deprecated:"1.33.0;1.35.0;use 'log_level = \"trace\"' instead"`
|
||||
ForgetFiles config.Duration `toml:"forget_files_after"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
root *vfs.VFS
|
||||
fscancel context.CancelFunc
|
||||
vfsopts vfscommon.Options
|
||||
|
||||
templates []*template.Template
|
||||
serializerFunc telegraf.SerializerFunc
|
||||
serializers map[string]telegraf.Serializer
|
||||
modified map[string]time.Time
|
||||
}
|
||||
|
||||
func (*File) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (f *File) SetSerializerFunc(sf telegraf.SerializerFunc) {
|
||||
f.serializerFunc = sf
|
||||
}
|
||||
|
||||
func (f *File) Init() error {
|
||||
if len(f.Files) == 0 {
|
||||
return errors.New("no files specified")
|
||||
}
|
||||
|
||||
// Set defaults
|
||||
if f.Remote.Empty() {
|
||||
if err := f.Remote.Set([]byte("local")); err != nil {
|
||||
return fmt.Errorf("setting default remote failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if f.FinalWriteTimeout <= 0 {
|
||||
f.FinalWriteTimeout = config.Duration(10 * time.Second)
|
||||
}
|
||||
|
||||
// Prepare VFS options
|
||||
f.vfsopts = vfscommon.Opt
|
||||
f.vfsopts.CacheMode = vfscommon.CacheModeWrites // required for appends
|
||||
if f.WriteBackInterval > 0 {
|
||||
f.vfsopts.WriteBack = fs.Duration(f.WriteBackInterval)
|
||||
}
|
||||
if f.MaxCacheSize > 0 {
|
||||
f.vfsopts.CacheMaxSize = fs.SizeSuffix(f.MaxCacheSize)
|
||||
}
|
||||
|
||||
// Redirect logging
|
||||
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||
if f.Trace {
|
||||
f.Log.Debugf("[%s] %s", level.String(), text)
|
||||
} else {
|
||||
f.Log.Tracef("[%s] %s", level.String(), text)
|
||||
}
|
||||
}
|
||||
|
||||
// Setup custom template functions
|
||||
funcs := template.FuncMap{"now": time.Now}
|
||||
|
||||
// Setup filename templates
|
||||
f.templates = make([]*template.Template, 0, len(f.Files))
|
||||
for _, ftmpl := range f.Files {
|
||||
tmpl, err := template.New(ftmpl).Funcs(funcs).Parse(ftmpl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing file template %q failed: %w", ftmpl, err)
|
||||
}
|
||||
f.templates = append(f.templates, tmpl)
|
||||
}
|
||||
|
||||
f.serializers = make(map[string]telegraf.Serializer)
|
||||
f.modified = make(map[string]time.Time)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) Connect() error {
|
||||
remoteRaw, err := f.Remote.Get()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting remote secret failed: %w", err)
|
||||
}
|
||||
remote := remoteRaw.String()
|
||||
remoteRaw.Destroy()
|
||||
|
||||
// Construct the underlying filesystem config
|
||||
parsed, err := fspath.Parse(remote)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing remote failed: %w", err)
|
||||
}
|
||||
info, err := fs.Find(parsed.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot find remote type %q: %w", parsed.Name, err)
|
||||
}
|
||||
|
||||
// Setup the remote virtual filesystem
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
rootfs, err := info.NewFs(ctx, parsed.Name, parsed.Path, fs.ConfigMap(info.Prefix, info.Options, parsed.Name, parsed.Config))
|
||||
if err != nil {
|
||||
cancel()
|
||||
return fmt.Errorf("creating remote failed: %w", err)
|
||||
}
|
||||
f.fscancel = cancel
|
||||
f.root = vfs.New(rootfs, &f.vfsopts)
|
||||
|
||||
// Force connection to make sure we actually can connect
|
||||
if _, err := f.root.Fs().List(ctx, "/"); err != nil {
|
||||
return err
|
||||
}
|
||||
total, used, free := f.root.Statfs()
|
||||
f.Log.Debugf("Connected to %s with %s total, %s used and %s free!",
|
||||
f.root.Fs().String(),
|
||||
humanize.Bytes(uint64(total)),
|
||||
humanize.Bytes(uint64(used)),
|
||||
humanize.Bytes(uint64(free)),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) Close() error {
|
||||
// Gracefully shutting down the root VFS
|
||||
if f.root != nil {
|
||||
f.root.FlushDirCache()
|
||||
f.root.WaitForWriters(time.Duration(f.FinalWriteTimeout))
|
||||
f.root.Shutdown()
|
||||
if err := f.root.CleanUp(); err != nil {
|
||||
f.Log.Errorf("Cleaning up vfs failed: %v", err)
|
||||
}
|
||||
f.root = nil
|
||||
}
|
||||
|
||||
if f.fscancel != nil {
|
||||
f.fscancel()
|
||||
f.fscancel = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) Write(metrics []telegraf.Metric) error {
|
||||
var buf bytes.Buffer
|
||||
|
||||
// Group the metrics per output file
|
||||
groups := make(map[string][]telegraf.Metric)
|
||||
for _, raw := range metrics {
|
||||
m := raw
|
||||
if wm, ok := raw.(telegraf.UnwrappableMetric); ok {
|
||||
m = wm.Unwrap()
|
||||
}
|
||||
|
||||
for _, tmpl := range f.templates {
|
||||
buf.Reset()
|
||||
if err := tmpl.Execute(&buf, m); err != nil {
|
||||
f.Log.Errorf("Cannot create filename %q for metric %v: %v", tmpl.Name(), m, err)
|
||||
continue
|
||||
}
|
||||
fn := buf.String()
|
||||
groups[fn] = append(groups[fn], m)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the metric groups
|
||||
groupBuffer := make(map[string][]byte, len(groups))
|
||||
for fn, fnMetrics := range groups {
|
||||
if _, found := f.serializers[fn]; !found {
|
||||
var err error
|
||||
if f.serializers[fn], err = f.serializerFunc(); err != nil {
|
||||
return fmt.Errorf("creating serializer failed: %w", err)
|
||||
}
|
||||
}
|
||||
serializer := f.serializers[fn]
|
||||
|
||||
if f.UseBatchFormat {
|
||||
serialized, err := serializer.SerializeBatch(fnMetrics)
|
||||
if err != nil {
|
||||
f.Log.Errorf("Could not serialize metrics: %v", err)
|
||||
continue
|
||||
}
|
||||
groupBuffer[fn] = serialized
|
||||
} else {
|
||||
for _, m := range fnMetrics {
|
||||
serialized, err := serializer.Serialize(m)
|
||||
if err != nil {
|
||||
f.Log.Debugf("Could not serialize metric: %v", err)
|
||||
continue
|
||||
}
|
||||
groupBuffer[fn] = append(groupBuffer[fn], serialized...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write the files
|
||||
t := time.Now()
|
||||
for fn, serialized := range groupBuffer {
|
||||
// Make sure the directory exists
|
||||
dir := filepath.Dir(filepath.ToSlash(fn))
|
||||
if dir != "." && dir != "/" {
|
||||
// Make sure we keep the original path-separators
|
||||
if filepath.ToSlash(fn) != fn {
|
||||
dir = filepath.FromSlash(dir)
|
||||
}
|
||||
if err := f.root.MkdirAll(dir, os.FileMode(f.root.Opt.DirPerms)); err != nil {
|
||||
return fmt.Errorf("creating dir %q failed: %w", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Open the file for appending or create a new one
|
||||
file, err := f.root.OpenFile(fn, os.O_APPEND|os.O_RDWR|os.O_CREATE, os.FileMode(f.root.Opt.FilePerms))
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening file %q: %w", fn, err)
|
||||
}
|
||||
|
||||
// Write the data
|
||||
if _, err := file.Write(serialized); err != nil {
|
||||
file.Close()
|
||||
return fmt.Errorf("writing metrics to file %q failed: %w", fn, err)
|
||||
}
|
||||
file.Close()
|
||||
|
||||
f.modified[fn] = t
|
||||
}
|
||||
|
||||
// Cleanup internal structures for old files
|
||||
if f.ForgetFiles > 0 {
|
||||
for fn, tmod := range f.modified {
|
||||
if t.Sub(tmod) > time.Duration(f.ForgetFiles) {
|
||||
delete(f.serializers, fn)
|
||||
delete(f.modified, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("remotefile", func() telegraf.Output { return &File{} })
|
||||
}
|
513
plugins/outputs/remotefile/remotefile_test.go
Normal file
513
plugins/outputs/remotefile/remotefile_test.go
Normal file
|
@ -0,0 +1,513 @@
|
|||
package remotefile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/serializers/csv"
|
||||
"github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestStaticFileCreation(t *testing.T) {
|
||||
input := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "localhost"},
|
||||
map[string]interface{}{"value": 42},
|
||||
time.Unix(1719410485, 0),
|
||||
),
|
||||
}
|
||||
expected := "test,source=localhost value=42i 1719410485000000000\n"
|
||||
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
Remote: config.NewSecret([]byte("local:" + tmpdir)),
|
||||
Files: []string{"test"},
|
||||
WriteBackInterval: config.Duration(100 * time.Millisecond),
|
||||
Log: &testutil.Logger{},
|
||||
}
|
||||
|
||||
plugin.SetSerializerFunc(func() (telegraf.Serializer, error) {
|
||||
serializer := &influx.Serializer{}
|
||||
err := serializer.Init()
|
||||
return serializer, err
|
||||
})
|
||||
|
||||
require.NoError(t, plugin.Init())
|
||||
require.NoError(t, plugin.Connect())
|
||||
defer plugin.Close()
|
||||
|
||||
// Write the input metrics and close the plugin. This is required to
|
||||
// actually flush the data to disk
|
||||
require.NoError(t, plugin.Write(input))
|
||||
plugin.Close()
|
||||
|
||||
// Check the result
|
||||
require.FileExists(t, filepath.Join(tmpdir, "test"))
|
||||
|
||||
actual, err := os.ReadFile(filepath.Join(tmpdir, "test"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, string(actual))
|
||||
}
|
||||
|
||||
func TestStaticFileAppend(t *testing.T) {
|
||||
input := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "localhost"},
|
||||
map[string]interface{}{"value": 42},
|
||||
time.Unix(1719410485, 0),
|
||||
),
|
||||
}
|
||||
expected := "test,source=remotehost value=23i 1719410465000000000\n"
|
||||
expected += "test,source=localhost value=42i 1719410485000000000\n"
|
||||
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Create a file where we want to append to
|
||||
f, err := os.OpenFile(filepath.Join(tmpdir, "test"), os.O_CREATE|os.O_WRONLY, 0600)
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
_, err = f.WriteString("test,source=remotehost value=23i 1719410465000000000\n")
|
||||
require.NoError(t, err)
|
||||
f.Close()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
Remote: config.NewSecret([]byte("local:" + tmpdir)),
|
||||
Files: []string{"test"},
|
||||
WriteBackInterval: config.Duration(100 * time.Millisecond),
|
||||
Log: &testutil.Logger{},
|
||||
}
|
||||
|
||||
plugin.SetSerializerFunc(func() (telegraf.Serializer, error) {
|
||||
serializer := &influx.Serializer{}
|
||||
err := serializer.Init()
|
||||
return serializer, err
|
||||
})
|
||||
|
||||
require.NoError(t, plugin.Init())
|
||||
require.NoError(t, plugin.Connect())
|
||||
defer plugin.Close()
|
||||
|
||||
// Write the input metrics and close the plugin. This is required to
|
||||
// actually flush the data to disk
|
||||
require.NoError(t, plugin.Write(input))
|
||||
plugin.Close()
|
||||
|
||||
// Check the result
|
||||
require.FileExists(t, filepath.Join(tmpdir, "test"))
|
||||
|
||||
actual, err := os.ReadFile(filepath.Join(tmpdir, "test"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, string(actual))
|
||||
}
|
||||
|
||||
func TestDynamicFiles(t *testing.T) {
|
||||
input := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "localhost"},
|
||||
map[string]interface{}{"value": 23},
|
||||
time.Unix(1719410465, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "remotehost"},
|
||||
map[string]interface{}{"value": 21},
|
||||
time.Unix(1719410465, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "localhost"},
|
||||
map[string]interface{}{"value": 42},
|
||||
time.Unix(1719410485, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "remotehost"},
|
||||
map[string]interface{}{"value": 66},
|
||||
time.Unix(1719410485, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "remotehost"},
|
||||
map[string]interface{}{"value": 55},
|
||||
time.Unix(1716310124, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "remotehost"},
|
||||
map[string]interface{}{"value": 1},
|
||||
time.Unix(1716310174, 0),
|
||||
),
|
||||
}
|
||||
expected := map[string][]string{
|
||||
"localhost-2024-06-26": {
|
||||
"test,source=localhost value=23i 1719410465000000000\n",
|
||||
"test,source=localhost value=42i 1719410485000000000\n",
|
||||
},
|
||||
"remotehost-2024-06-26": {
|
||||
"test,source=remotehost value=21i 1719410465000000000\n",
|
||||
"test,source=remotehost value=66i 1719410485000000000\n",
|
||||
},
|
||||
"remotehost-2024-05-21": {
|
||||
"test,source=remotehost value=55i 1716310124000000000\n",
|
||||
"test,source=remotehost value=1i 1716310174000000000\n",
|
||||
},
|
||||
}
|
||||
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
Remote: config.NewSecret([]byte("local:" + tmpdir)),
|
||||
Files: []string{`{{.Tag "source"}}-{{.Time.Format "2006-01-02"}}`},
|
||||
WriteBackInterval: config.Duration(100 * time.Millisecond),
|
||||
Log: &testutil.Logger{},
|
||||
}
|
||||
|
||||
plugin.SetSerializerFunc(func() (telegraf.Serializer, error) {
|
||||
serializer := &influx.Serializer{}
|
||||
err := serializer.Init()
|
||||
return serializer, err
|
||||
})
|
||||
|
||||
require.NoError(t, plugin.Init())
|
||||
require.NoError(t, plugin.Connect())
|
||||
defer plugin.Close()
|
||||
|
||||
// Write the first batch of metrics wait for the data to settle to disk
|
||||
require.NoError(t, plugin.Write(input[:2]))
|
||||
require.Eventually(t, func() bool {
|
||||
_, err1 := os.Stat(filepath.Join(tmpdir, "localhost-2024-06-26"))
|
||||
_, err2 := os.Stat(filepath.Join(tmpdir, "remotehost-2024-06-26"))
|
||||
return err1 == nil && err2 == nil
|
||||
}, 5*time.Second, 100*time.Millisecond)
|
||||
|
||||
// Check the result
|
||||
for _, fn := range []string{"localhost-2024-06-26", "remotehost-2024-06-26"} {
|
||||
tmpfn := filepath.Join(tmpdir, fn)
|
||||
require.FileExists(t, tmpfn)
|
||||
|
||||
actual, err := os.ReadFile(tmpfn)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected[fn][0], string(actual))
|
||||
}
|
||||
|
||||
require.NoError(t, plugin.Write(input[2:]))
|
||||
plugin.Close()
|
||||
|
||||
// Check the result
|
||||
for fn, lines := range expected {
|
||||
expectedContent := strings.Join(lines, "")
|
||||
tmpfn := filepath.Join(tmpdir, fn)
|
||||
require.FileExists(t, tmpfn)
|
||||
|
||||
actual, err := os.ReadFile(tmpfn)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedContent, string(actual))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustomTemplateFunctions(t *testing.T) {
|
||||
input := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "localhost"},
|
||||
map[string]interface{}{"value": 42},
|
||||
time.Unix(1587686400, 0),
|
||||
),
|
||||
}
|
||||
expected := "test,source=localhost value=42i 1587686400000000000\n"
|
||||
|
||||
expectedFilename := fmt.Sprintf("test-%d", time.Now().Year())
|
||||
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
Remote: config.NewSecret([]byte("local:" + tmpdir)),
|
||||
Files: []string{"test-{{now.Year}}"},
|
||||
WriteBackInterval: config.Duration(100 * time.Millisecond),
|
||||
Log: &testutil.Logger{},
|
||||
}
|
||||
|
||||
plugin.SetSerializerFunc(func() (telegraf.Serializer, error) {
|
||||
serializer := &influx.Serializer{}
|
||||
err := serializer.Init()
|
||||
return serializer, err
|
||||
})
|
||||
|
||||
require.NoError(t, plugin.Init())
|
||||
require.NoError(t, plugin.Connect())
|
||||
defer plugin.Close()
|
||||
|
||||
// Write the input metrics and close the plugin. This is required to
|
||||
// actually flush the data to disk
|
||||
require.NoError(t, plugin.Write(input))
|
||||
plugin.Close()
|
||||
|
||||
// Check the result
|
||||
require.FileExists(t, filepath.Join(tmpdir, expectedFilename))
|
||||
|
||||
actual, err := os.ReadFile(filepath.Join(tmpdir, expectedFilename))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, string(actual))
|
||||
}
|
||||
|
||||
func TestCSVSerialization(t *testing.T) {
|
||||
input := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "a"},
|
||||
map[string]interface{}{"value": 42},
|
||||
time.Unix(1587686400, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "b"},
|
||||
map[string]interface{}{"value": 23},
|
||||
time.Unix(1587686400, 0),
|
||||
),
|
||||
}
|
||||
expected := map[string]string{
|
||||
"test-a.csv": "timestamp,measurement,source,value\n1587686400,test,a,42\n",
|
||||
"test-b.csv": "timestamp,measurement,source,value\n1587686400,test,b,23\n",
|
||||
}
|
||||
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
Remote: config.NewSecret([]byte("local:" + tmpdir)),
|
||||
Files: []string{`test-{{.Tag "source"}}.csv`},
|
||||
WriteBackInterval: config.Duration(100 * time.Millisecond),
|
||||
Log: &testutil.Logger{},
|
||||
}
|
||||
|
||||
plugin.SetSerializerFunc(func() (telegraf.Serializer, error) {
|
||||
serializer := &csv.Serializer{Header: true}
|
||||
err := serializer.Init()
|
||||
return serializer, err
|
||||
})
|
||||
|
||||
require.NoError(t, plugin.Init())
|
||||
require.NoError(t, plugin.Connect())
|
||||
defer plugin.Close()
|
||||
|
||||
// Write the input metrics and close the plugin. This is required to
|
||||
// actually flush the data to disk
|
||||
require.NoError(t, plugin.Write(input))
|
||||
plugin.Close()
|
||||
|
||||
// Check the result
|
||||
for expectedFilename, expectedContent := range expected {
|
||||
require.FileExists(t, filepath.Join(tmpdir, expectedFilename))
|
||||
buf, err := os.ReadFile(filepath.Join(tmpdir, expectedFilename))
|
||||
require.NoError(t, err)
|
||||
actual := strings.ReplaceAll(string(buf), "\r\n", "\n")
|
||||
require.Equal(t, expectedContent, actual)
|
||||
}
|
||||
|
||||
require.Len(t, plugin.modified, 2)
|
||||
require.Contains(t, plugin.modified, "test-a.csv")
|
||||
require.Contains(t, plugin.modified, "test-b.csv")
|
||||
require.Len(t, plugin.serializers, 2)
|
||||
require.Contains(t, plugin.serializers, "test-a.csv")
|
||||
require.Contains(t, plugin.serializers, "test-b.csv")
|
||||
}
|
||||
|
||||
func TestForgettingFiles(t *testing.T) {
|
||||
input := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "a"},
|
||||
map[string]interface{}{"value": 42},
|
||||
time.Unix(1587686400, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "b"},
|
||||
map[string]interface{}{"value": 23},
|
||||
time.Unix(1587686400, 0),
|
||||
),
|
||||
}
|
||||
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
Remote: config.NewSecret([]byte("local:" + tmpdir)),
|
||||
Files: []string{`test-{{.Tag "source"}}.csv`},
|
||||
WriteBackInterval: config.Duration(100 * time.Millisecond),
|
||||
ForgetFiles: config.Duration(10 * time.Millisecond),
|
||||
Log: &testutil.Logger{},
|
||||
}
|
||||
|
||||
plugin.SetSerializerFunc(func() (telegraf.Serializer, error) {
|
||||
serializer := &csv.Serializer{Header: true}
|
||||
err := serializer.Init()
|
||||
return serializer, err
|
||||
})
|
||||
|
||||
require.NoError(t, plugin.Init())
|
||||
require.NoError(t, plugin.Connect())
|
||||
defer plugin.Close()
|
||||
|
||||
// Write the input metrics and close the plugin. This is required to
|
||||
// actually flush the data to disk
|
||||
require.NoError(t, plugin.Write(input[:1]))
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.NoError(t, plugin.Write(input[1:]))
|
||||
|
||||
plugin.Close()
|
||||
|
||||
// Check the result
|
||||
require.Len(t, plugin.modified, 1)
|
||||
require.Contains(t, plugin.modified, "test-b.csv")
|
||||
require.Len(t, plugin.serializers, 1)
|
||||
require.Contains(t, plugin.serializers, "test-b.csv")
|
||||
}
|
||||
|
||||
func TestTrackingMetrics(t *testing.T) {
|
||||
// see issue #16045
|
||||
inputRaw := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "localhost"},
|
||||
map[string]interface{}{"value": 23},
|
||||
time.Unix(1719410465, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "remotehost"},
|
||||
map[string]interface{}{"value": 21},
|
||||
time.Unix(1719410465, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "localhost"},
|
||||
map[string]interface{}{"value": 42},
|
||||
time.Unix(1719410485, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "remotehost"},
|
||||
map[string]interface{}{"value": 66},
|
||||
time.Unix(1719410485, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "remotehost"},
|
||||
map[string]interface{}{"value": 55},
|
||||
time.Unix(1716310124, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{"source": "remotehost"},
|
||||
map[string]interface{}{"value": 1},
|
||||
time.Unix(1716310174, 0),
|
||||
),
|
||||
}
|
||||
|
||||
// Create tracking metrics as inputs for the test
|
||||
var mu sync.Mutex
|
||||
delivered := make([]telegraf.DeliveryInfo, 0, len(inputRaw))
|
||||
notify := func(di telegraf.DeliveryInfo) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
delivered = append(delivered, di)
|
||||
}
|
||||
input := make([]telegraf.Metric, 0, len(inputRaw))
|
||||
for _, m := range inputRaw {
|
||||
tm, _ := metric.WithTracking(m, notify)
|
||||
input = append(input, tm)
|
||||
}
|
||||
|
||||
// Create the expectations
|
||||
expected := map[string][]string{
|
||||
"localhost-2024-06-26": {
|
||||
"test,source=localhost value=23i 1719410465000000000\n",
|
||||
"test,source=localhost value=42i 1719410485000000000\n",
|
||||
},
|
||||
"remotehost-2024-06-26": {
|
||||
"test,source=remotehost value=21i 1719410465000000000\n",
|
||||
"test,source=remotehost value=66i 1719410485000000000\n",
|
||||
},
|
||||
"remotehost-2024-05-21": {
|
||||
"test,source=remotehost value=55i 1716310124000000000\n",
|
||||
"test,source=remotehost value=1i 1716310174000000000\n",
|
||||
},
|
||||
}
|
||||
|
||||
// Prepare the output filesystem
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
Remote: config.NewSecret([]byte("local:" + tmpdir)),
|
||||
Files: []string{`{{.Tag "source"}}-{{.Time.Format "2006-01-02"}}`},
|
||||
WriteBackInterval: config.Duration(100 * time.Millisecond),
|
||||
Log: &testutil.Logger{},
|
||||
}
|
||||
|
||||
plugin.SetSerializerFunc(func() (telegraf.Serializer, error) {
|
||||
serializer := &influx.Serializer{}
|
||||
err := serializer.Init()
|
||||
return serializer, err
|
||||
})
|
||||
require.NoError(t, plugin.Init())
|
||||
require.NoError(t, plugin.Connect())
|
||||
defer plugin.Close()
|
||||
|
||||
// Write the input metrics and close the plugin. This is required to
|
||||
// actually flush the data to disk
|
||||
require.NoError(t, plugin.Write(input))
|
||||
plugin.Close()
|
||||
|
||||
// Wait for the data to settle to disk
|
||||
require.Eventually(t, func() bool {
|
||||
ok := true
|
||||
for fn := range expected {
|
||||
_, err := os.Stat(filepath.Join(tmpdir, fn))
|
||||
ok = ok && err == nil
|
||||
}
|
||||
return ok
|
||||
}, 5*time.Second, 100*time.Millisecond)
|
||||
|
||||
// Check the result
|
||||
for fn, lines := range expected {
|
||||
tmpfn := filepath.Join(tmpdir, fn)
|
||||
require.FileExists(t, tmpfn)
|
||||
|
||||
actual, err := os.ReadFile(tmpfn)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, strings.Join(lines, ""), string(actual))
|
||||
}
|
||||
|
||||
// Simulate output acknowledging delivery
|
||||
for _, m := range input {
|
||||
m.Accept()
|
||||
}
|
||||
|
||||
// Check delivery
|
||||
require.Eventuallyf(t, func() bool {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return len(input) == len(delivered)
|
||||
}, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected))
|
||||
}
|
49
plugins/outputs/remotefile/sample.conf
Normal file
49
plugins/outputs/remotefile/sample.conf
Normal file
|
@ -0,0 +1,49 @@
|
|||
# Send telegraf metrics to file(s) in a remote filesystem
|
||||
[[outputs.remotefile]]
|
||||
## Remote location according to https://rclone.org/#providers
|
||||
## Check the backend configuration options and specify them in
|
||||
## <backend type>[,<param1>=<value1>[,...,<paramN>=<valueN>]]:[root]
|
||||
## for example:
|
||||
## remote = 's3,provider=AWS,access_key_id=...,secret_access_key=...,session_token=...,region=us-east-1:mybucket'
|
||||
## By default, remote is the local current directory
|
||||
# remote = "local:"
|
||||
|
||||
## Files to write in the remote location
|
||||
## Each file can be a Golang template for generating the filename from metrics.
|
||||
## See https://pkg.go.dev/text/template for a reference and use the metric
|
||||
## name (`{{.Name}}`), tag values (`{{.Tag "name"}}`), field values
|
||||
## (`{{.Field "name"}}`) or the metric time (`{{.Time}}) to derive the
|
||||
## filename.
|
||||
## The 'files' setting may contain directories relative to the root path
|
||||
## defined in 'remote'.
|
||||
files = ['{{.Name}}-{{.Time.Format "2006-01-02"}}']
|
||||
|
||||
## Use batch serialization format instead of line based delimiting.
|
||||
## The batch format allows for the production of non-line-based output formats
|
||||
## and may more efficiently encode metrics.
|
||||
# use_batch_format = false
|
||||
|
||||
## Cache settings
|
||||
## Time to wait for all writes to complete on shutdown of the plugin.
|
||||
# final_write_timeout = "10s"
|
||||
|
||||
## Time to wait between writing to a file and uploading to the remote location
|
||||
# cache_write_back = "5s"
|
||||
|
||||
## Maximum size of the cache on disk (infinite by default)
|
||||
# cache_max_size = -1
|
||||
|
||||
## Forget files after not being touched for longer than the given time
|
||||
## This is useful to prevent memory leaks when using time-based filenames
|
||||
## as it allows internal structures to be cleaned up.
|
||||
## Note: When writing to a file after is has been forgotten, the file is
|
||||
## treated as a new file which might cause file-headers to be appended
|
||||
## again by certain serializers like CSV.
|
||||
## By default files will be kept indefinitely.
|
||||
# forget_files_after = "0s"
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
Loading…
Add table
Add a link
Reference in a new issue