1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,109 @@
# Netfilter Conntrack Input Plugin
This plugin collects metrics from [Netfilter's conntrack tools][conntrack].
There are two collection mechanisms for this plugin:
1. Extracting information from `/proc/net/stat/nf_conntrack` files if the
`collect` option is set accordingly for finding CPU specific values.
2. Using specific files and directories by specifying the `dirs` option. At
runtime, conntrack exposes many of those connection statistics within
`/proc/sys/net`. Depending on your kernel version, these files can be found
in either `/proc/sys/net/ipv4/netfilter` or `/proc/sys/net/netfilter` and
will be prefixed with either `ip` or `nf`.
In order to simplify configuration in a heterogeneous environment, a superset
of directory and filenames can be specified. Any locations that doesn't exist
is ignored.
⭐ Telegraf v1.0.0
🏷️ system
💻 linux
[conntrack]: https://conntrack-tools.netfilter.org/
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Collects conntrack stats from the configured directories and files.
# This plugin ONLY supports Linux
[[inputs.conntrack]]
## The following defaults would work with multiple versions of conntrack.
## Note the nf_ and ip_ filename prefixes are mutually exclusive across
## kernel versions, as are the directory locations.
## Look through /proc/net/stat/nf_conntrack for these metrics
## all - aggregated statistics
## percpu - include detailed statistics with cpu tag
collect = ["all", "percpu"]
## User-specified directories and files to look through
## Directories to search within for the conntrack files above.
## Missing directories will be ignored.
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
## Superset of filenames to look for within the conntrack dirs.
## Missing files will be ignored.
files = ["ip_conntrack_count","ip_conntrack_max",
"nf_conntrack_count","nf_conntrack_max"]
```
## Metrics
A detailed explanation of each fields can be found in
[kernel documentation][kerneldoc]
[kerneldoc]: https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt
- conntrack
- `ip_conntrack_count` `(int, count)`: The number of entries in the conntrack table
- `ip_conntrack_max` `(int, size)`: The max capacity of the conntrack table
- `ip_conntrack_buckets` `(int, size)`: The size of hash table.
With `collect = ["all"]`:
- `entries`: The number of entries in the conntrack table
- `searched`: The number of conntrack table lookups performed
- `found`: The number of searched entries which were successful
- `new`: The number of entries added which were not expected before
- `invalid`: The number of packets seen which can not be tracked
- `ignore`: The number of packets seen which are already connected to an entry
- `delete`: The number of entries which were removed
- `delete_list`: The number of entries which were put to dying list
- `insert`: The number of entries inserted into the list
- `insert_failed`: The number of insertion attempted but failed (duplicate entry)
- `drop`: The number of packets dropped due to conntrack failure
- `early_drop`: The number of dropped entries to make room for new ones, if
`maxsize` is reached
- `icmp_error`: Subset of invalid. Packets that can't be tracked due to error
- `expect_new`: Entries added after an expectation was already present
- `expect_create`: Expectations added
- `expect_delete`: Expectations deleted
- `search_restart`: Conntrack table lookups restarted due to hashtable resizes
### Tags
With `collect = ["percpu"]` will include detailed statistics per CPU thread.
Without `"percpu"` the `cpu` tag will have `all` value.
## Example Output
```text
conntrack,host=myhost ip_conntrack_count=2,ip_conntrack_max=262144 1461620427667995735
```
with stats:
```text
conntrack,cpu=all,host=localhost delete=0i,delete_list=0i,drop=2i,early_drop=0i,entries=5568i,expect_create=0i,expect_delete=0i,expect_new=0i,found=7i,icmp_error=1962i,ignore=2586413402i,insert=0i,insert_failed=2i,invalid=46853i,new=0i,search_restart=453336i,searched=0i 1615233542000000000
conntrack,host=localhost ip_conntrack_count=464,ip_conntrack_max=262144 1615233542000000000
```

View file

@ -0,0 +1,164 @@
//go:generate ../../../tools/readme_config_includer/generator
//go:build linux
package conntrack
import (
_ "embed"
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/choice"
"github.com/influxdata/telegraf/plugins/common/psutil"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
var (
dfltDirs = []string{
"/proc/sys/net/ipv4/netfilter",
"/proc/sys/net/netfilter",
}
dfltFiles = []string{
"ip_conntrack_count",
"ip_conntrack_max",
"nf_conntrack_count",
"nf_conntrack_max",
}
)
const (
inputName = "conntrack"
)
type Conntrack struct {
Collect []string `toml:"collect"`
Dirs []string `toml:"dirs"`
Files []string `toml:"files"`
ps psutil.PS
}
func (*Conntrack) SampleConfig() string {
return sampleConfig
}
func (c *Conntrack) Init() error {
c.setDefaults()
if err := choice.CheckSlice(c.Collect, []string{"all", "percpu"}); err != nil {
return fmt.Errorf("config option 'collect': %w", err)
}
return nil
}
func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
var metricKey string
fields := make(map[string]interface{})
for _, dir := range c.Dirs {
for _, file := range c.Files {
// NOTE: no system will have both nf_ and ip_ prefixes,
// so we're safe to branch on suffix only.
parts := strings.SplitN(file, "_", 2)
if len(parts) < 2 {
continue
}
metricKey = "ip_" + parts[1]
fName := filepath.Join(dir, file)
if _, err := os.Stat(fName); err != nil {
continue
}
contents, err := os.ReadFile(fName)
if err != nil {
acc.AddError(fmt.Errorf("failed to read file %q: %w", fName, err))
continue
}
v := strings.TrimSpace(string(contents))
fields[metricKey], err = strconv.ParseFloat(v, 64)
if err != nil {
acc.AddError(fmt.Errorf("failed to parse metric, expected number but "+
" found %q: %w", v, err))
}
}
}
for _, metric := range c.Collect {
perCPU := metric == "percpu"
stats, err := c.ps.NetConntrack(perCPU)
if err != nil {
acc.AddError(fmt.Errorf("failed to retrieve conntrack statistics: %w", err))
}
if len(stats) == 0 {
acc.AddError(errors.New("conntrack input failed to collect stats"))
}
cpuTag := "all"
for i, sts := range stats {
if perCPU {
cpuTag = fmt.Sprintf("cpu%d", i)
}
tags := map[string]string{
"cpu": cpuTag,
}
statFields := map[string]interface{}{
"entries": sts.Entries, // entries in the conntrack table
"searched": sts.Searched, // conntrack table lookups performed
"found": sts.Found, // searched entries which were successful
"new": sts.New, // entries added which were not expected before
"invalid": sts.Invalid, // packets seen which can not be tracked
"ignore": sts.Ignore, // packets seen which are already connected to an entry
"delete": sts.Delete, // entries which were removed
"delete_list": sts.DeleteList, // entries which were put to dying list
"insert": sts.Insert, // entries inserted into the list
"insert_failed": sts.InsertFailed, // insertion attempted but failed (same entry exists)
"drop": sts.Drop, // packets dropped due to conntrack failure
"early_drop": sts.EarlyDrop, // dropped entries to make room for new ones, if maxsize reached
"icmp_error": sts.IcmpError, // Subset of invalid. Packets that can't be tracked d/t error
"expect_new": sts.ExpectNew, // Entries added after an expectation was already present
"expect_create": sts.ExpectCreate, // Expectations added
"expect_delete": sts.ExpectDelete, // Expectations deleted
"search_restart": sts.SearchRestart, // onntrack table lookups restarted due to hashtable resizes
}
acc.AddCounter(inputName, statFields, tags)
}
}
if len(fields) == 0 {
return errors.New("conntrack input failed to collect metrics, make sure that the kernel module is loaded")
}
acc.AddFields(inputName, fields, nil)
return nil
}
func (c *Conntrack) setDefaults() {
if len(c.Dirs) == 0 {
c.Dirs = dfltDirs
}
if len(c.Files) == 0 {
c.Files = dfltFiles
}
}
func init() {
inputs.Add(inputName, func() telegraf.Input {
return &Conntrack{
ps: psutil.NewSystemPS(),
}
})
}

View file

@ -0,0 +1,33 @@
//go:generate ../../../tools/readme_config_includer/generator
//go:build !linux
package conntrack
import (
_ "embed"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
type Conntrack struct {
Log telegraf.Logger `toml:"-"`
}
func (*Conntrack) SampleConfig() string { return sampleConfig }
func (c *Conntrack) Init() error {
c.Log.Warn("Current platform is not supported")
return nil
}
func (*Conntrack) Gather(_ telegraf.Accumulator) error { return nil }
func init() {
inputs.Add("conntrack", func() telegraf.Input {
return &Conntrack{}
})
}

View file

@ -0,0 +1,342 @@
//go:build linux
package conntrack
import (
"os"
"path"
"strconv"
"strings"
"testing"
"github.com/shirou/gopsutil/v4/net"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/plugins/common/psutil"
"github.com/influxdata/telegraf/testutil"
)
func restoreDflts(savedFiles, savedDirs []string) {
dfltFiles = savedFiles
dfltDirs = savedDirs
}
func TestNoFilesFound(t *testing.T) {
defer restoreDflts(dfltFiles, dfltDirs)
dfltFiles = []string{"baz.txt"}
dfltDirs = []string{"./foo/bar"}
c := &Conntrack{}
require.NoError(t, c.Init())
acc := &testutil.Accumulator{}
err := c.Gather(acc)
require.EqualError(t, err, "conntrack input failed to collect metrics, make sure that the kernel module is loaded")
}
func TestDefaultsUsed(t *testing.T) {
defer restoreDflts(dfltFiles, dfltDirs)
tmpdir := t.TempDir()
tmpFile, err := os.CreateTemp(tmpdir, "ip_conntrack_count")
require.NoError(t, err)
defer os.Remove(tmpFile.Name())
dfltDirs = []string{tmpdir}
fname := path.Base(tmpFile.Name())
dfltFiles = []string{fname}
count := 1234321
require.NoError(t, os.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0640))
c := &Conntrack{}
require.NoError(t, c.Init())
acc := &testutil.Accumulator{}
require.NoError(t, c.Gather(acc))
acc.AssertContainsFields(t, inputName, map[string]interface{}{
fname: float64(count)})
}
func TestConfigsUsed(t *testing.T) {
defer restoreDflts(dfltFiles, dfltDirs)
tmpdir := t.TempDir()
cntFile, err := os.CreateTemp(tmpdir, "nf_conntrack_count")
require.NoError(t, err)
maxFile, err := os.CreateTemp(tmpdir, "nf_conntrack_max")
require.NoError(t, err)
defer os.Remove(cntFile.Name())
defer os.Remove(maxFile.Name())
dfltDirs = []string{tmpdir}
cntFname := path.Base(cntFile.Name())
maxFname := path.Base(maxFile.Name())
dfltFiles = []string{cntFname, maxFname}
count := 1234321
limit := 9999999
require.NoError(t, os.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0640))
require.NoError(t, os.WriteFile(maxFile.Name(), []byte(strconv.Itoa(limit)), 0640))
c := &Conntrack{}
require.NoError(t, c.Init())
acc := &testutil.Accumulator{}
require.NoError(t, c.Gather(acc))
fix := func(s string) string {
return strings.Replace(s, "nf_", "ip_", 1)
}
acc.AssertContainsFields(t, inputName,
map[string]interface{}{
fix(cntFname): float64(count),
fix(maxFname): float64(limit),
})
}
func TestCollectStats(t *testing.T) {
var mps psutil.MockPS
defer mps.AssertExpectations(t)
var acc testutil.Accumulator
sts := net.ConntrackStat{
Entries: 1234,
Searched: 10,
Found: 1,
New: 5,
Invalid: 43,
Ignore: 13,
Delete: 3,
DeleteList: 5,
Insert: 9,
InsertFailed: 20,
Drop: 49,
EarlyDrop: 7,
IcmpError: 21,
ExpectNew: 12,
ExpectCreate: 44,
ExpectDelete: 53,
SearchRestart: 31,
}
mps.On("NetConntrack", false).Return([]net.ConntrackStat{sts}, nil)
cs := &Conntrack{
ps: &mps,
Collect: []string{"all"},
}
require.NoError(t, cs.Init())
err := cs.Gather(&acc)
if err != nil && strings.Contains(err.Error(), "Is the conntrack kernel module loaded?") {
t.Skip("Conntrack kernel module not loaded.")
}
require.NoError(t, err)
expectedTags := map[string]string{
"cpu": "all",
}
expectedFields := map[string]interface{}{
"entries": uint32(1234),
"searched": uint32(10),
"found": uint32(1),
"new": uint32(5),
"invalid": uint32(43),
"ignore": uint32(13),
"delete": uint32(3),
"delete_list": uint32(5),
"insert": uint32(9),
"insert_failed": uint32(20),
"drop": uint32(49),
"early_drop": uint32(7),
"icmp_error": uint32(21),
"expect_new": uint32(12),
"expect_create": uint32(44),
"expect_delete": uint32(53),
"search_restart": uint32(31),
}
acc.AssertContainsFields(t, inputName, expectedFields)
acc.AssertContainsTaggedFields(t, inputName, expectedFields, expectedTags)
require.Equal(t, 19, acc.NFields())
}
func TestCollectStatsPerCpu(t *testing.T) {
var mps psutil.MockPS
defer mps.AssertExpectations(t)
var acc testutil.Accumulator
sts := []net.ConntrackStat{
{
Entries: 59,
Searched: 10,
Found: 1,
New: 5,
Invalid: 43,
Ignore: 13,
Delete: 3,
DeleteList: 5,
Insert: 9,
InsertFailed: 20,
Drop: 49,
EarlyDrop: 7,
IcmpError: 21,
ExpectNew: 12,
ExpectCreate: 44,
ExpectDelete: 53,
SearchRestart: 31,
},
{
Entries: 79,
Searched: 10,
Found: 1,
New: 5,
Invalid: 43,
Ignore: 13,
Delete: 3,
DeleteList: 5,
Insert: 9,
InsertFailed: 10,
Drop: 49,
EarlyDrop: 7,
IcmpError: 21,
ExpectNew: 12,
ExpectCreate: 44,
ExpectDelete: 53,
SearchRestart: 31,
},
}
mps.On("NetConntrack", true).Return(sts, nil)
allSts := []net.ConntrackStat{
{
Entries: 129,
Searched: 20,
Found: 2,
New: 10,
Invalid: 86,
Ignore: 26,
Delete: 6,
DeleteList: 10,
Insert: 18,
InsertFailed: 40,
Drop: 98,
EarlyDrop: 17,
IcmpError: 42,
ExpectNew: 24,
ExpectCreate: 88,
ExpectDelete: 106,
SearchRestart: 62,
},
}
mps.On("NetConntrack", false).Return(allSts, nil)
cs := &Conntrack{
ps: &mps,
Collect: []string{"all", "percpu"},
}
require.NoError(t, cs.Init())
err := cs.Gather(&acc)
if err != nil && strings.Contains(err.Error(), "Is the conntrack kernel module loaded?") {
t.Skip("Conntrack kernel module not loaded.")
}
require.NoError(t, err)
// cpu0
expectedFields := map[string]interface{}{
"entries": uint32(59),
"searched": uint32(10),
"found": uint32(1),
"new": uint32(5),
"invalid": uint32(43),
"ignore": uint32(13),
"delete": uint32(3),
"delete_list": uint32(5),
"insert": uint32(9),
"insert_failed": uint32(20),
"drop": uint32(49),
"early_drop": uint32(7),
"icmp_error": uint32(21),
"expect_new": uint32(12),
"expect_create": uint32(44),
"expect_delete": uint32(53),
"search_restart": uint32(31),
}
acc.AssertContainsTaggedFields(t, inputName, expectedFields,
map[string]string{
"cpu": "cpu0",
})
// cpu1
expectedFields1 := map[string]interface{}{
"entries": uint32(79),
"searched": uint32(10),
"found": uint32(1),
"new": uint32(5),
"invalid": uint32(43),
"ignore": uint32(13),
"delete": uint32(3),
"delete_list": uint32(5),
"insert": uint32(9),
"insert_failed": uint32(10),
"drop": uint32(49),
"early_drop": uint32(7),
"icmp_error": uint32(21),
"expect_new": uint32(12),
"expect_create": uint32(44),
"expect_delete": uint32(53),
"search_restart": uint32(31),
}
acc.AssertContainsTaggedFields(t, inputName, expectedFields1,
map[string]string{
"cpu": "cpu1",
})
allFields := map[string]interface{}{
"entries": uint32(129),
"searched": uint32(20),
"found": uint32(2),
"new": uint32(10),
"invalid": uint32(86),
"ignore": uint32(26),
"delete": uint32(6),
"delete_list": uint32(10),
"insert": uint32(18),
"insert_failed": uint32(40),
"drop": uint32(98),
"early_drop": uint32(17),
"icmp_error": uint32(42),
"expect_new": uint32(24),
"expect_create": uint32(88),
"expect_delete": uint32(106),
"search_restart": uint32(62),
}
acc.AssertContainsTaggedFields(t, inputName, allFields,
map[string]string{
"cpu": "all",
})
require.Equal(t, 53, acc.NFields())
}
func TestCollectPsSystemInit(t *testing.T) {
var acc testutil.Accumulator
cs := &Conntrack{
ps: psutil.NewSystemPS(),
Collect: []string{"all"},
}
require.NoError(t, cs.Init())
err := cs.Gather(&acc)
if err != nil && strings.Contains(err.Error(), "Is the conntrack kernel module loaded?") {
t.Skip("Conntrack kernel module not loaded.")
}
// make sure Conntrack.ps gets initialized without mocking
require.NoError(t, err)
}

View file

@ -0,0 +1,21 @@
# Collects conntrack stats from the configured directories and files.
# This plugin ONLY supports Linux
[[inputs.conntrack]]
## The following defaults would work with multiple versions of conntrack.
## Note the nf_ and ip_ filename prefixes are mutually exclusive across
## kernel versions, as are the directory locations.
## Look through /proc/net/stat/nf_conntrack for these metrics
## all - aggregated statistics
## percpu - include detailed statistics with cpu tag
collect = ["all", "percpu"]
## User-specified directories and files to look through
## Directories to search within for the conntrack files above.
## Missing directories will be ignored.
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
## Superset of filenames to look for within the conntrack dirs.
## Missing files will be ignored.
files = ["ip_conntrack_count","ip_conntrack_max",
"nf_conntrack_count","nf_conntrack_max"]