1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,75 @@
# Control Group Input Plugin
This plugin gathers statistics per [control group (cgroup)][cgroup].
> [!NOTE]
> Consider restricting paths to the set of cgroups you are interested in if you
> have a large number of cgroups, to avoid cardinality issues.
The plugin supports the _single value format_ in the form
```text
VAL\n
```
the _new line separated values format_ in the form
```text
VAL0\n
VAL1\n
```
the _space separated values format_ in the form
```text
VAL0 VAL1 ...\n
```
and the _space separated keys and value, separated by new line format_ in the
form
```text
KEY0 ... VAL0\n
KEY1 ... VAL1\n
```
⭐ Telegraf v1.0.0
🏷️ system
💻 linux
[cgroup]: https://docs.kernel.org/admin-guide/cgroup-v2.html
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Read specific statistics per cgroup
# This plugin ONLY supports Linux
[[inputs.cgroup]]
## Directories in which to look for files, globs are supported.
## Consider restricting paths to the set of cgroups you really
## want to monitor if you have a large number of cgroups, to avoid
## any cardinality issues.
# paths = [
# "/sys/fs/cgroup/memory",
# "/sys/fs/cgroup/memory/child1",
# "/sys/fs/cgroup/memory/child2/*",
# ]
## cgroup stat fields, as file names, globs are supported.
## these file names are appended to each path from above.
# files = ["memory.*usage*", "memory.limit_in_bytes"]
```
## Metrics
All measurements have the `path` tag.
## Example Output

View file

@ -0,0 +1,33 @@
//go:generate ../../../tools/readme_config_includer/generator
package cgroup
import (
_ "embed"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
type CGroup struct {
Paths []string `toml:"paths"`
Files []string `toml:"files"`
logged map[string]bool
}
func (*CGroup) SampleConfig() string {
return sampleConfig
}
func (cg *CGroup) Init() error {
cg.logged = make(map[string]bool)
return nil
}
func init() {
inputs.Add("cgroup", func() telegraf.Input { return &CGroup{} })
}

View file

@ -0,0 +1,305 @@
//go:build linux
package cgroup
import (
"fmt"
"math"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/influxdata/telegraf"
)
const metricName = "cgroup"
func (cg *CGroup) Gather(acc telegraf.Accumulator) error {
list := make(chan pathInfo)
go cg.generateDirs(list)
for dir := range list {
if dir.err != nil {
acc.AddError(dir.err)
continue
}
if err := cg.gatherDir(acc, dir.path); err != nil {
acc.AddError(err)
}
}
return nil
}
func (cg *CGroup) gatherDir(acc telegraf.Accumulator, dir string) error {
fields := make(map[string]interface{})
list := make(chan pathInfo)
go cg.generateFiles(dir, list)
for file := range list {
if file.err != nil {
return file.err
}
raw, err := os.ReadFile(file.path)
if err != nil {
return err
}
if len(raw) == 0 {
continue
}
fd := fileData{data: raw, path: file.path}
if err := fd.parse(fields); err != nil {
if !cg.logged[file.path] {
acc.AddError(err)
}
cg.logged[file.path] = true
continue
}
}
tags := map[string]string{"path": dir}
acc.AddFields(metricName, fields, tags)
return nil
}
// ======================================================================
type pathInfo struct {
path string
err error
}
func isDir(pathToCheck string) (bool, error) {
result, err := os.Stat(pathToCheck)
if err != nil {
return false, err
}
return result.IsDir(), nil
}
func (cg *CGroup) generateDirs(list chan<- pathInfo) {
defer close(list)
for _, dir := range cg.Paths {
// getting all dirs that match the pattern 'dir'
items, err := filepath.Glob(dir)
if err != nil {
list <- pathInfo{err: err}
return
}
for _, item := range items {
ok, err := isDir(item)
if err != nil {
list <- pathInfo{err: err}
return
}
// supply only dirs
if ok {
list <- pathInfo{path: item}
}
}
}
}
func (cg *CGroup) generateFiles(dir string, list chan<- pathInfo) {
dir = strings.Replace(dir, "\\", "\\\\", -1)
defer close(list)
for _, file := range cg.Files {
// getting all file paths that match the pattern 'dir + file'
// path.Base make sure that file variable does not contains part of path
items, err := filepath.Glob(path.Join(dir, path.Base(file)))
if err != nil {
list <- pathInfo{err: err}
return
}
for _, item := range items {
ok, err := isDir(item)
if err != nil {
list <- pathInfo{err: err}
return
}
// supply only files not dirs
if !ok {
list <- pathInfo{path: item}
}
}
}
}
// ======================================================================
type fileData struct {
data []byte
path string
}
func (fd *fileData) format() (*fileFormat, error) {
for _, ff := range fileFormats {
ok, err := ff.match(fd.data)
if err != nil {
return nil, err
}
if ok {
return &ff, nil
}
}
return nil, fmt.Errorf("%v: unknown file format", fd.path)
}
func (fd *fileData) parse(fields map[string]interface{}) error {
format, err := fd.format()
if err != nil {
return err
}
format.parser(filepath.Base(fd.path), fields, fd.data)
return nil
}
// ======================================================================
type fileFormat struct {
name string
pattern string
parser func(measurement string, fields map[string]interface{}, b []byte)
}
const keyPattern = "[[:alnum:]:_.]+"
const valuePattern = "(?:max|[\\d-\\.]+)"
var fileFormats = [...]fileFormat{
// VAL\n
{
name: "Single value",
pattern: "^" + valuePattern + "\n$",
parser: func(measurement string, fields map[string]interface{}, b []byte) {
re := regexp.MustCompile("^(" + valuePattern + ")\n$")
matches := re.FindAllStringSubmatch(string(b), -1)
fields[measurement] = numberOrString(matches[0][1])
},
},
// VAL0\n
// VAL1\n
// ...
{
name: "New line separated values",
pattern: "^(" + valuePattern + "\n){2,}$",
parser: func(measurement string, fields map[string]interface{}, b []byte) {
re := regexp.MustCompile("(" + valuePattern + ")\n")
matches := re.FindAllStringSubmatch(string(b), -1)
for i, v := range matches {
fields[measurement+"."+strconv.Itoa(i)] = numberOrString(v[1])
}
},
},
// VAL0 VAL1 ...\n
{
name: "Space separated values",
pattern: "^(" + valuePattern + " ?)+\n$",
parser: func(measurement string, fields map[string]interface{}, b []byte) {
re := regexp.MustCompile("(" + valuePattern + ")")
matches := re.FindAllStringSubmatch(string(b), -1)
for i, v := range matches {
fields[measurement+"."+strconv.Itoa(i)] = numberOrString(v[1])
}
},
},
// KEY0 ... VAL0\n
// KEY1 ... VAL1\n
// ...
{
name: "Space separated keys and value, separated by new line",
pattern: "^((" + keyPattern + " )+" + valuePattern + "\n)+$",
parser: func(measurement string, fields map[string]interface{}, b []byte) {
re := regexp.MustCompile("((?:" + keyPattern + " ?)+) (" + valuePattern + ")\n")
matches := re.FindAllStringSubmatch(string(b), -1)
for _, v := range matches {
k := strings.ReplaceAll(v[1], " ", ".")
fields[measurement+"."+k] = numberOrString(v[2])
}
},
},
// NAME0 KEY0=VAL0 ...\n
// NAME1 KEY1=VAL1 ...\n
// ...
{
name: "Equal sign separated key-value pairs, multiple lines with name",
pattern: fmt.Sprintf("^(%s( %s=%s)+\n)+$", keyPattern, keyPattern, valuePattern),
parser: func(measurement string, fields map[string]interface{}, b []byte) {
lines := strings.Split(string(b), "\n")
for _, line := range lines {
f := strings.Fields(line)
if len(f) == 0 {
continue
}
name := f[0]
for _, field := range f[1:] {
k, v, found := strings.Cut(field, "=")
if found {
fields[strings.Join([]string{measurement, name, k}, ".")] = numberOrString(v)
}
}
}
},
},
// KEY0=VAL0 KEY1=VAL1 ...\n
{
name: "Equal sign separated key-value pairs on a single line",
pattern: fmt.Sprintf("^(%s=%s ?)+\n$", keyPattern, valuePattern),
parser: func(measurement string, fields map[string]interface{}, b []byte) {
f := strings.Fields(string(b))
if len(f) == 0 {
return
}
for _, field := range f {
k, v, found := strings.Cut(field, "=")
if found {
fields[strings.Join([]string{measurement, k}, ".")] = numberOrString(v)
}
}
},
},
}
func numberOrString(s string) interface{} {
i, err := strconv.ParseInt(s, 10, 64)
if err == nil {
return i
}
if s == "max" {
return int64(math.MaxInt64)
}
// Care should be taken to always interpret each field as the same type on every cycle.
// *.pressure files follow the PSI format and contain numbers with fractional parts
// that always have a decimal separator, even when the fractional part is 0 (e.g., "0.00"),
// thus they will always be interpreted as floats.
// https://www.kernel.org/doc/Documentation/accounting/psi.txt
f, err := strconv.ParseFloat(s, 64)
if err == nil {
return f
}
return s
}
func (f fileFormat) match(b []byte) (bool, error) {
ok, err := regexp.Match(f.pattern, b)
if err != nil {
return false, err
}
if ok {
return true, nil
}
return false, nil
}

View file

@ -0,0 +1,11 @@
//go:build !linux
package cgroup
import (
"github.com/influxdata/telegraf"
)
func (*CGroup) Gather(_ telegraf.Accumulator) error {
return nil
}

View file

@ -0,0 +1,445 @@
//go:build linux
package cgroup
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
func TestCgroupStatistics_1(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/memory"},
Files: []string{
"memory.empty",
"memory.max_usage_in_bytes",
"memory.limit_in_bytes",
"memory.stat",
"memory.use_hierarchy",
"notify_on_release",
},
}
require.NoError(t, acc.GatherError(cg.Gather))
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{
"path": "testdata/memory",
},
map[string]interface{}{
"memory.stat.cache": int64(1739362304123123123),
"memory.stat.rss": int64(1775325184),
"memory.stat.rss_huge": int64(778043392),
"memory.stat.mapped_file": int64(421036032),
"memory.stat.dirty": int64(-307200),
"memory.max_usage_in_bytes.0": int64(0),
"memory.max_usage_in_bytes.1": int64(-1),
"memory.max_usage_in_bytes.2": int64(2),
"memory.limit_in_bytes": int64(223372036854771712),
"memory.use_hierarchy": "12-781",
"notify_on_release": int64(0),
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestCgroupStatistics_2(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/cpu"},
Files: []string{
"cpuacct.usage_percpu",
"cpu.stat",
},
}
require.NoError(t, acc.GatherError(cg.Gather))
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{
"path": "testdata/cpu",
},
map[string]interface{}{
"cpu.stat.core_sched.force_idle_usec": int64(0),
"cpu.stat.system_usec": int64(103537582650),
"cpu.stat.usage_usec": int64(614953149468),
"cpu.stat.user_usec": int64(511415566817),
"cpuacct.usage_percpu.0": int64(-1452543795404),
"cpuacct.usage_percpu.1": int64(1376681271659),
"cpuacct.usage_percpu.2": int64(1450950799997),
"cpuacct.usage_percpu.3": int64(-1473113374257),
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestCgroupStatistics_3(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/memory/*"},
Files: []string{"memory.limit_in_bytes"},
}
fields := map[string]interface{}{
"memory.limit_in_bytes": int64(223372036854771712),
}
require.NoError(t, acc.GatherError(cg.Gather))
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{
"path": "testdata/memory/group_1",
},
fields,
time.Unix(0, 0),
),
metric.New(
"cgroup",
map[string]string{
"path": "testdata/memory/group_2",
},
fields,
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestCgroupStatistics_4(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/memory/*/*", "testdata/memory/group_2"},
Files: []string{"memory.limit_in_bytes"},
}
fields := map[string]interface{}{
"memory.limit_in_bytes": int64(223372036854771712),
}
require.NoError(t, acc.GatherError(cg.Gather))
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{
"path": "testdata/memory/group_1/group_1_1",
},
fields,
time.Unix(0, 0),
),
metric.New(
"cgroup",
map[string]string{
"path": "testdata/memory/group_1/group_1_2",
},
fields,
time.Unix(0, 0),
),
metric.New(
"cgroup",
map[string]string{
"path": "testdata/memory/group_2/group_1_1",
},
fields,
time.Unix(0, 0),
),
metric.New(
"cgroup",
map[string]string{
"path": "testdata/memory/group_2",
},
map[string]interface{}{
"memory.limit_in_bytes": int64(223372036854771712),
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestCgroupStatistics_5(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/memory/*/group_1_1"},
Files: []string{"memory.limit_in_bytes"},
}
require.NoError(t, acc.GatherError(cg.Gather))
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{
"path": "testdata/memory/group_1/group_1_1",
},
map[string]interface{}{
"memory.limit_in_bytes": int64(223372036854771712),
},
time.Unix(0, 0),
),
metric.New(
"cgroup",
map[string]string{
"path": "testdata/memory/group_2/group_1_1",
},
map[string]interface{}{
"memory.limit_in_bytes": int64(223372036854771712),
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestCgroupStatistics_6(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/memory"},
Files: []string{"memory.us*", "*/memory.kmem.*"},
}
require.NoError(t, acc.GatherError(cg.Gather))
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{
"path": "testdata/memory",
},
map[string]interface{}{
"memory.usage_in_bytes": int64(3513667584),
"memory.use_hierarchy": "12-781",
"memory.kmem.limit_in_bytes": int64(9223372036854771712),
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestCgroupStatistics_7(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/blkio"},
Files: []string{"blkio.throttle.io_serviced"},
}
require.NoError(t, acc.GatherError(cg.Gather))
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{
"path": "testdata/blkio",
},
map[string]interface{}{
"blkio.throttle.io_serviced.11:0.Read": int64(0),
"blkio.throttle.io_serviced.11:0.Write": int64(0),
"blkio.throttle.io_serviced.11:0.Sync": int64(0),
"blkio.throttle.io_serviced.11:0.Async": int64(0),
"blkio.throttle.io_serviced.11:0.Total": int64(0),
"blkio.throttle.io_serviced.8:0.Read": int64(49134),
"blkio.throttle.io_serviced.8:0.Write": int64(216703),
"blkio.throttle.io_serviced.8:0.Sync": int64(177906),
"blkio.throttle.io_serviced.8:0.Async": int64(87931),
"blkio.throttle.io_serviced.8:0.Total": int64(265837),
"blkio.throttle.io_serviced.7:7.Read": int64(0),
"blkio.throttle.io_serviced.7:7.Write": int64(0),
"blkio.throttle.io_serviced.7:7.Sync": int64(0),
"blkio.throttle.io_serviced.7:7.Async": int64(0),
"blkio.throttle.io_serviced.7:7.Total": int64(0),
"blkio.throttle.io_serviced.7:6.Read": int64(0),
"blkio.throttle.io_serviced.7:6.Write": int64(0),
"blkio.throttle.io_serviced.7:6.Sync": int64(0),
"blkio.throttle.io_serviced.7:6.Async": int64(0),
"blkio.throttle.io_serviced.7:6.Total": int64(0),
"blkio.throttle.io_serviced.7:5.Read": int64(0),
"blkio.throttle.io_serviced.7:5.Write": int64(0),
"blkio.throttle.io_serviced.7:5.Sync": int64(0),
"blkio.throttle.io_serviced.7:5.Async": int64(0),
"blkio.throttle.io_serviced.7:5.Total": int64(0),
"blkio.throttle.io_serviced.7:4.Read": int64(0),
"blkio.throttle.io_serviced.7:4.Write": int64(0),
"blkio.throttle.io_serviced.7:4.Sync": int64(0),
"blkio.throttle.io_serviced.7:4.Async": int64(0),
"blkio.throttle.io_serviced.7:4.Total": int64(0),
"blkio.throttle.io_serviced.7:3.Read": int64(0),
"blkio.throttle.io_serviced.7:3.Write": int64(0),
"blkio.throttle.io_serviced.7:3.Sync": int64(0),
"blkio.throttle.io_serviced.7:3.Async": int64(0),
"blkio.throttle.io_serviced.7:3.Total": int64(0),
"blkio.throttle.io_serviced.7:2.Read": int64(0),
"blkio.throttle.io_serviced.7:2.Write": int64(0),
"blkio.throttle.io_serviced.7:2.Sync": int64(0),
"blkio.throttle.io_serviced.7:2.Async": int64(0),
"blkio.throttle.io_serviced.7:2.Total": int64(0),
"blkio.throttle.io_serviced.7:1.Read": int64(0),
"blkio.throttle.io_serviced.7:1.Write": int64(0),
"blkio.throttle.io_serviced.7:1.Sync": int64(0),
"blkio.throttle.io_serviced.7:1.Async": int64(0),
"blkio.throttle.io_serviced.7:1.Total": int64(0),
"blkio.throttle.io_serviced.7:0.Read": int64(0),
"blkio.throttle.io_serviced.7:0.Write": int64(0),
"blkio.throttle.io_serviced.7:0.Sync": int64(0),
"blkio.throttle.io_serviced.7:0.Async": int64(0),
"blkio.throttle.io_serviced.7:0.Total": int64(0),
"blkio.throttle.io_serviced.1:15.Read": int64(3),
"blkio.throttle.io_serviced.1:15.Write": int64(0),
"blkio.throttle.io_serviced.1:15.Sync": int64(0),
"blkio.throttle.io_serviced.1:15.Async": int64(3),
"blkio.throttle.io_serviced.1:15.Total": int64(3),
"blkio.throttle.io_serviced.1:14.Read": int64(3),
"blkio.throttle.io_serviced.1:14.Write": int64(0),
"blkio.throttle.io_serviced.1:14.Sync": int64(0),
"blkio.throttle.io_serviced.1:14.Async": int64(3),
"blkio.throttle.io_serviced.1:14.Total": int64(3),
"blkio.throttle.io_serviced.1:13.Read": int64(3),
"blkio.throttle.io_serviced.1:13.Write": int64(0),
"blkio.throttle.io_serviced.1:13.Sync": int64(0),
"blkio.throttle.io_serviced.1:13.Async": int64(3),
"blkio.throttle.io_serviced.1:13.Total": int64(3),
"blkio.throttle.io_serviced.1:12.Read": int64(3),
"blkio.throttle.io_serviced.1:12.Write": int64(0),
"blkio.throttle.io_serviced.1:12.Sync": int64(0),
"blkio.throttle.io_serviced.1:12.Async": int64(3),
"blkio.throttle.io_serviced.1:12.Total": int64(3),
"blkio.throttle.io_serviced.1:11.Read": int64(3),
"blkio.throttle.io_serviced.1:11.Write": int64(0),
"blkio.throttle.io_serviced.1:11.Sync": int64(0),
"blkio.throttle.io_serviced.1:11.Async": int64(3),
"blkio.throttle.io_serviced.1:11.Total": int64(3),
"blkio.throttle.io_serviced.1:10.Read": int64(3),
"blkio.throttle.io_serviced.1:10.Write": int64(0),
"blkio.throttle.io_serviced.1:10.Sync": int64(0),
"blkio.throttle.io_serviced.1:10.Async": int64(3),
"blkio.throttle.io_serviced.1:10.Total": int64(3),
"blkio.throttle.io_serviced.1:9.Read": int64(3),
"blkio.throttle.io_serviced.1:9.Write": int64(0),
"blkio.throttle.io_serviced.1:9.Sync": int64(0),
"blkio.throttle.io_serviced.1:9.Async": int64(3),
"blkio.throttle.io_serviced.1:9.Total": int64(3),
"blkio.throttle.io_serviced.1:8.Read": int64(3),
"blkio.throttle.io_serviced.1:8.Write": int64(0),
"blkio.throttle.io_serviced.1:8.Sync": int64(0),
"blkio.throttle.io_serviced.1:8.Async": int64(3),
"blkio.throttle.io_serviced.1:8.Total": int64(3),
"blkio.throttle.io_serviced.1:7.Read": int64(3),
"blkio.throttle.io_serviced.1:7.Write": int64(0),
"blkio.throttle.io_serviced.1:7.Sync": int64(0),
"blkio.throttle.io_serviced.1:7.Async": int64(3),
"blkio.throttle.io_serviced.1:7.Total": int64(3),
"blkio.throttle.io_serviced.1:6.Read": int64(3),
"blkio.throttle.io_serviced.1:6.Write": int64(0),
"blkio.throttle.io_serviced.1:6.Sync": int64(0),
"blkio.throttle.io_serviced.1:6.Async": int64(3),
"blkio.throttle.io_serviced.1:6.Total": int64(3),
"blkio.throttle.io_serviced.1:5.Read": int64(3),
"blkio.throttle.io_serviced.1:5.Write": int64(0),
"blkio.throttle.io_serviced.1:5.Sync": int64(0),
"blkio.throttle.io_serviced.1:5.Async": int64(3),
"blkio.throttle.io_serviced.1:5.Total": int64(3),
"blkio.throttle.io_serviced.1:4.Read": int64(3),
"blkio.throttle.io_serviced.1:4.Write": int64(0),
"blkio.throttle.io_serviced.1:4.Sync": int64(0),
"blkio.throttle.io_serviced.1:4.Async": int64(3),
"blkio.throttle.io_serviced.1:4.Total": int64(3),
"blkio.throttle.io_serviced.1:3.Read": int64(3),
"blkio.throttle.io_serviced.1:3.Write": int64(0),
"blkio.throttle.io_serviced.1:3.Sync": int64(0),
"blkio.throttle.io_serviced.1:3.Async": int64(3),
"blkio.throttle.io_serviced.1:3.Total": int64(3),
"blkio.throttle.io_serviced.1:2.Read": int64(3),
"blkio.throttle.io_serviced.1:2.Write": int64(0),
"blkio.throttle.io_serviced.1:2.Sync": int64(0),
"blkio.throttle.io_serviced.1:2.Async": int64(3),
"blkio.throttle.io_serviced.1:2.Total": int64(3),
"blkio.throttle.io_serviced.1:1.Read": int64(3),
"blkio.throttle.io_serviced.1:1.Write": int64(0),
"blkio.throttle.io_serviced.1:1.Sync": int64(0),
"blkio.throttle.io_serviced.1:1.Async": int64(3),
"blkio.throttle.io_serviced.1:1.Total": int64(3),
"blkio.throttle.io_serviced.1:0.Read": int64(3),
"blkio.throttle.io_serviced.1:0.Write": int64(0),
"blkio.throttle.io_serviced.1:0.Sync": int64(0),
"blkio.throttle.io_serviced.1:0.Async": int64(3),
"blkio.throttle.io_serviced.1:0.Total": int64(3),
"blkio.throttle.io_serviced.Total": int64(265885),
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestCgroupStatistics_8(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/broken"},
Files: []string{"malformed.file", "memory.limit_in_bytes"},
logged: make(map[string]bool),
}
require.Error(t, acc.GatherError(cg.Gather))
require.Len(t, cg.logged, 1)
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{"path": "testdata/broken"},
map[string]interface{}{"memory.limit_in_bytes": int64(1)},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
// clear errors so we can check for new errors in next round
acc.Errors = nil
require.NoError(t, acc.GatherError(cg.Gather))
require.Len(t, cg.logged, 1)
}
func TestCgroupEscapeDir(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/backslash/machine-qemu\x2d1\x2d*"},
Files: []string{"cpu.stat"},
logged: make(map[string]bool),
}
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{"path": `testdata/backslash/machine-qemu-1-ubuntu`},
map[string]interface{}{
"cpu.stat.core_sched.force_idle_usec": int64(0),
"cpu.stat.system_usec": int64(103537582650),
"cpu.stat.usage_usec": int64(614953149468),
"cpu.stat.user_usec": int64(511415566817),
},
time.Unix(0, 0),
),
}
require.NoError(t, acc.GatherError(cg.Gather))
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}

View file

@ -0,0 +1,328 @@
//go:build linux
package cgroup
import (
"math"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
func TestCgroupV2Cpu(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/v2"},
Files: []string{"cpu.*"},
logged: make(map[string]bool),
}
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{"path": `testdata/v2`},
map[string]interface{}{
"cpu.idle": int64(0),
"cpu.max.0": int64(4800000),
"cpu.max.1": int64(100000),
"cpu.max.burst": int64(0),
"cpu.pressure.full.avg10": float64(0),
"cpu.pressure.full.avg300": float64(0.05),
"cpu.pressure.full.avg60": float64(0.08),
"cpu.pressure.full.total": int64(277111656),
"cpu.pressure.some.avg10": float64(0),
"cpu.pressure.some.avg300": float64(0.06),
"cpu.pressure.some.avg60": float64(0.08),
"cpu.pressure.some.total": int64(293391454),
"cpu.stat.burst_usec": int64(0),
"cpu.stat.core_sched.force_idle_usec": int64(0),
"cpu.stat.nr_bursts": int64(0),
"cpu.stat.nr_periods": int64(3936904),
"cpu.stat.nr_throttled": int64(6004),
"cpu.stat.system_usec": int64(37345608977),
"cpu.stat.throttled_usec": int64(19175137007),
"cpu.stat.usage_usec": int64(98701325189),
"cpu.stat.user_usec": int64(61355716211),
"cpu.weight": int64(79),
"cpu.weight.nice": int64(1),
},
time.Unix(0, 0),
),
}
require.NoError(t, acc.GatherError(cg.Gather))
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestCgroupV2Memory(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/v2"},
Files: []string{"memory.*"},
logged: make(map[string]bool),
}
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{"path": `testdata/v2`},
map[string]interface{}{
"memory.current": int64(13071106048),
"memory.events.high": int64(0),
"memory.events.local.high": int64(0),
"memory.events.local.low": int64(0),
"memory.events.local.max": int64(0),
"memory.events.local.oom": int64(0),
"memory.events.local.oom_group_kill": int64(0),
"memory.events.local.oom_kill": int64(0),
"memory.events.low": int64(0),
"memory.events.max": int64(0),
"memory.events.oom": int64(0),
"memory.events.oom_group_kill": int64(0),
"memory.events.oom_kill": int64(0),
"memory.high": int64(math.MaxInt64),
"memory.low": int64(0),
"memory.max": int64(103079215104),
"memory.min": int64(0),
"memory.numa_stat.active_anon.N0": int64(81920),
"memory.numa_stat.active_anon.N1": int64(98304),
"memory.numa_stat.active_file.N0": int64(2946760704),
"memory.numa_stat.active_file.N1": int64(2650640384),
"memory.numa_stat.anon.N0": int64(1330585600),
"memory.numa_stat.anon.N1": int64(1141161984),
"memory.numa_stat.anon_thp.N0": int64(0),
"memory.numa_stat.anon_thp.N1": int64(2097152),
"memory.numa_stat.file.N0": int64(4531773440),
"memory.numa_stat.file.N1": int64(4001075200),
"memory.numa_stat.file_dirty.N0": int64(258048),
"memory.numa_stat.file_dirty.N1": int64(45056),
"memory.numa_stat.file_mapped.N0": int64(10272768),
"memory.numa_stat.file_mapped.N1": int64(3940352),
"memory.numa_stat.file_thp.N0": int64(0),
"memory.numa_stat.file_thp.N1": int64(0),
"memory.numa_stat.file_writeback.N0": int64(0),
"memory.numa_stat.file_writeback.N1": int64(0),
"memory.numa_stat.inactive_anon.N0": int64(1330479104),
"memory.numa_stat.inactive_anon.N1": int64(1141067776),
"memory.numa_stat.inactive_file.N0": int64(1584979968),
"memory.numa_stat.inactive_file.N1": int64(1350430720),
"memory.numa_stat.kernel_stack.N0": int64(4161536),
"memory.numa_stat.kernel_stack.N1": int64(5537792),
"memory.numa_stat.pagetables.N0": int64(7839744),
"memory.numa_stat.pagetables.N1": int64(8462336),
"memory.numa_stat.sec_pagetables.N0": int64(0),
"memory.numa_stat.sec_pagetables.N1": int64(0),
"memory.numa_stat.shmem.N0": int64(0),
"memory.numa_stat.shmem.N1": int64(4096),
"memory.numa_stat.shmem_thp.N0": int64(0),
"memory.numa_stat.shmem_thp.N1": int64(0),
"memory.numa_stat.slab_reclaimable.N0": int64(950447920),
"memory.numa_stat.slab_reclaimable.N1": int64(1081869088),
"memory.numa_stat.slab_unreclaimable.N0": int64(2654816),
"memory.numa_stat.slab_unreclaimable.N1": int64(2661512),
"memory.numa_stat.swapcached.N0": int64(0),
"memory.numa_stat.swapcached.N1": int64(0),
"memory.numa_stat.unevictable.N0": int64(0),
"memory.numa_stat.unevictable.N1": int64(0),
"memory.numa_stat.workingset_activate_anon.N0": int64(0),
"memory.numa_stat.workingset_activate_anon.N1": int64(0),
"memory.numa_stat.workingset_activate_file.N0": int64(40145),
"memory.numa_stat.workingset_activate_file.N1": int64(65541),
"memory.numa_stat.workingset_nodereclaim.N0": int64(0),
"memory.numa_stat.workingset_nodereclaim.N1": int64(0),
"memory.numa_stat.workingset_refault_anon.N0": int64(0),
"memory.numa_stat.workingset_refault_anon.N1": int64(0),
"memory.numa_stat.workingset_refault_file.N0": int64(346752),
"memory.numa_stat.workingset_refault_file.N1": int64(282604),
"memory.numa_stat.workingset_restore_anon.N0": int64(0),
"memory.numa_stat.workingset_restore_anon.N1": int64(0),
"memory.numa_stat.workingset_restore_file.N0": int64(19386),
"memory.numa_stat.workingset_restore_file.N1": int64(10010),
"memory.oom.group": int64(1),
"memory.peak": int64(87302021120),
"memory.pressure.full.avg10": float64(0),
"memory.pressure.full.avg300": float64(0),
"memory.pressure.full.avg60": float64(0),
"memory.pressure.full.total": int64(250662),
"memory.pressure.some.avg10": float64(0),
"memory.pressure.some.avg300": float64(0),
"memory.pressure.some.avg60": float64(0),
"memory.pressure.some.total": int64(250773),
"memory.stat.active_anon": int64(180224),
"memory.stat.active_file": int64(5597401088),
"memory.stat.anon": int64(2471755776),
"memory.stat.anon_thp": int64(2097152),
"memory.stat.file": int64(8532865024),
"memory.stat.file_dirty": int64(319488),
"memory.stat.file_mapped": int64(14213120),
"memory.stat.file_thp": int64(0),
"memory.stat.file_writeback": int64(0),
"memory.stat.inactive_anon": int64(2471559168),
"memory.stat.inactive_file": int64(2935459840),
"memory.stat.kernel": int64(2065149952),
"memory.stat.kernel_stack": int64(9699328),
"memory.stat.pagetables": int64(16302080),
"memory.stat.percpu": int64(3528),
"memory.stat.pgactivate": int64(13516655),
"memory.stat.pgdeactivate": int64(9151751),
"memory.stat.pgfault": int64(1973187551),
"memory.stat.pglazyfree": int64(5549),
"memory.stat.pglazyfreed": int64(1),
"memory.stat.pgmajfault": int64(8497),
"memory.stat.pgrefill": int64(9153617),
"memory.stat.pgscan": int64(12149209),
"memory.stat.pgscan_direct": int64(4436521),
"memory.stat.pgscan_kswapd": int64(7712688),
"memory.stat.pgsteal": int64(12139915),
"memory.stat.pgsteal_direct": int64(4429690),
"memory.stat.pgsteal_kswapd": int64(7710225),
"memory.stat.sec_pagetables": int64(0),
"memory.stat.shmem": int64(4096),
"memory.stat.shmem_thp": int64(0),
"memory.stat.slab": int64(2037641160),
"memory.stat.slab_reclaimable": int64(2032322192),
"memory.stat.slab_unreclaimable": int64(5318968),
"memory.stat.sock": int64(0),
"memory.stat.swapcached": int64(0),
"memory.stat.thp_collapse_alloc": int64(3),
"memory.stat.thp_fault_alloc": int64(13),
"memory.stat.unevictable": int64(0),
"memory.stat.vmalloc": int64(0),
"memory.stat.workingset_activate_anon": int64(0),
"memory.stat.workingset_activate_file": int64(105686),
"memory.stat.workingset_nodereclaim": int64(0),
"memory.stat.workingset_refault_anon": int64(0),
"memory.stat.workingset_refault_file": int64(629356),
"memory.stat.workingset_restore_anon": int64(0),
"memory.stat.workingset_restore_file": int64(29396),
"memory.swap.current": int64(0),
"memory.swap.events.fail": int64(0),
"memory.swap.events.high": int64(0),
"memory.swap.events.max": int64(0),
"memory.swap.high": int64(math.MaxInt64),
"memory.swap.max": int64(0),
},
time.Unix(0, 0),
),
}
require.NoError(t, acc.GatherError(cg.Gather))
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestCgroupV2Io(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/v2"},
Files: []string{"io.*"},
logged: make(map[string]bool),
}
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{"path": `testdata/v2`},
map[string]interface{}{
"io.bfq.weight.default": int64(100),
"io.pressure.full.avg10": float64(0),
"io.pressure.full.avg300": float64(0),
"io.pressure.full.avg60": float64(0),
"io.pressure.full.total": 184607952,
"io.pressure.some.avg10": float64(0),
"io.pressure.some.avg300": float64(0),
"io.pressure.some.avg60": float64(0),
"io.pressure.some.total": 185162400,
"io.stat.259:8.dbytes": int64(0),
"io.stat.259:8.dios": int64(0),
"io.stat.259:8.rbytes": int64(74526720),
"io.stat.259:8.rios": int64(2936),
"io.stat.259:8.wbytes": int64(3789381632),
"io.stat.259:8.wios": int64(181928),
},
time.Unix(0, 0),
),
}
require.NoError(t, acc.GatherError(cg.Gather))
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestCgroupV2Hugetlb(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/v2"},
Files: []string{"hugetlb.*"},
logged: make(map[string]bool),
}
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{"path": `testdata/v2`},
map[string]interface{}{
"hugetlb.1GB.current": int64(0),
"hugetlb.1GB.events.0": int64(math.MaxInt64),
"hugetlb.1GB.events.1": int64(0),
"hugetlb.1GB.events.local.0": int64(math.MaxInt64),
"hugetlb.1GB.events.local.1": int64(0),
"hugetlb.1GB.max": int64(math.MaxInt64),
"hugetlb.1GB.numa_stat.N0": int64(0),
"hugetlb.1GB.numa_stat.N1": int64(0),
"hugetlb.1GB.numa_stat.total": int64(0),
"hugetlb.1GB.rsvd.current": int64(0),
"hugetlb.1GB.rsvd.max": int64(math.MaxInt64),
"hugetlb.2MB.current": int64(0),
"hugetlb.2MB.events.0": int64(math.MaxInt64),
"hugetlb.2MB.events.1": int64(0),
"hugetlb.2MB.events.local.0": int64(math.MaxInt64),
"hugetlb.2MB.events.local.1": int64(0),
"hugetlb.2MB.max": int64(math.MaxInt64),
"hugetlb.2MB.numa_stat.N0": int64(0),
"hugetlb.2MB.numa_stat.N1": int64(0),
"hugetlb.2MB.numa_stat.total": int64(0),
"hugetlb.2MB.rsvd.current": int64(0),
"hugetlb.2MB.rsvd.max": int64(math.MaxInt64),
},
time.Unix(0, 0),
),
}
require.NoError(t, acc.GatherError(cg.Gather))
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestCgroupV2Pids(t *testing.T) {
var acc testutil.Accumulator
var cg = &CGroup{
Paths: []string{"testdata/v2"},
Files: []string{"pids.*"},
logged: make(map[string]bool),
}
expected := []telegraf.Metric{
metric.New(
"cgroup",
map[string]string{"path": `testdata/v2`},
map[string]interface{}{
"pids.current": int64(592),
"pids.events.0": int64(math.MaxInt64),
"pids.events.1": int64(0),
"pids.max": int64(629145),
"pids.peak": int64(2438),
},
time.Unix(0, 0),
),
}
require.NoError(t, acc.GatherError(cg.Gather))
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}

View file

@ -0,0 +1,15 @@
# Read specific statistics per cgroup
# This plugin ONLY supports Linux
[[inputs.cgroup]]
## Directories in which to look for files, globs are supported.
## Consider restricting paths to the set of cgroups you really
## want to monitor if you have a large number of cgroups, to avoid
## any cardinality issues.
# paths = [
# "/sys/fs/cgroup/memory",
# "/sys/fs/cgroup/memory/child1",
# "/sys/fs/cgroup/memory/child2/*",
# ]
## cgroup stat fields, as file names, globs are supported.
## these file names are appended to each path from above.
# files = ["memory.*usage*", "memory.limit_in_bytes"]

View file

@ -0,0 +1,4 @@
usage_usec 614953149468
user_usec 511415566817
system_usec 103537582650
core_sched.force_idle_usec 0

View file

@ -0,0 +1 @@
Total 0

View file

@ -0,0 +1,131 @@
11:0 Read 0
11:0 Write 0
11:0 Sync 0
11:0 Async 0
11:0 Total 0
8:0 Read 49134
8:0 Write 216703
8:0 Sync 177906
8:0 Async 87931
8:0 Total 265837
7:7 Read 0
7:7 Write 0
7:7 Sync 0
7:7 Async 0
7:7 Total 0
7:6 Read 0
7:6 Write 0
7:6 Sync 0
7:6 Async 0
7:6 Total 0
7:5 Read 0
7:5 Write 0
7:5 Sync 0
7:5 Async 0
7:5 Total 0
7:4 Read 0
7:4 Write 0
7:4 Sync 0
7:4 Async 0
7:4 Total 0
7:3 Read 0
7:3 Write 0
7:3 Sync 0
7:3 Async 0
7:3 Total 0
7:2 Read 0
7:2 Write 0
7:2 Sync 0
7:2 Async 0
7:2 Total 0
7:1 Read 0
7:1 Write 0
7:1 Sync 0
7:1 Async 0
7:1 Total 0
7:0 Read 0
7:0 Write 0
7:0 Sync 0
7:0 Async 0
7:0 Total 0
1:15 Read 3
1:15 Write 0
1:15 Sync 0
1:15 Async 3
1:15 Total 3
1:14 Read 3
1:14 Write 0
1:14 Sync 0
1:14 Async 3
1:14 Total 3
1:13 Read 3
1:13 Write 0
1:13 Sync 0
1:13 Async 3
1:13 Total 3
1:12 Read 3
1:12 Write 0
1:12 Sync 0
1:12 Async 3
1:12 Total 3
1:11 Read 3
1:11 Write 0
1:11 Sync 0
1:11 Async 3
1:11 Total 3
1:10 Read 3
1:10 Write 0
1:10 Sync 0
1:10 Async 3
1:10 Total 3
1:9 Read 3
1:9 Write 0
1:9 Sync 0
1:9 Async 3
1:9 Total 3
1:8 Read 3
1:8 Write 0
1:8 Sync 0
1:8 Async 3
1:8 Total 3
1:7 Read 3
1:7 Write 0
1:7 Sync 0
1:7 Async 3
1:7 Total 3
1:6 Read 3
1:6 Write 0
1:6 Sync 0
1:6 Async 3
1:6 Total 3
1:5 Read 3
1:5 Write 0
1:5 Sync 0
1:5 Async 3
1:5 Total 3
1:4 Read 3
1:4 Write 0
1:4 Sync 0
1:4 Async 3
1:4 Total 3
1:3 Read 3
1:3 Write 0
1:3 Sync 0
1:3 Async 3
1:3 Total 3
1:2 Read 3
1:2 Write 0
1:2 Sync 0
1:2 Async 3
1:2 Total 3
1:1 Read 3
1:1 Write 0
1:1 Sync 0
1:1 Async 3
1:1 Total 3
1:0 Read 3
1:0 Write 0
1:0 Sync 0
1:0 Async 3
1:0 Total 3
Total 265885

View file

@ -0,0 +1 @@
garbage

View file

@ -0,0 +1 @@
1

View file

@ -0,0 +1 @@
-1

View file

@ -0,0 +1,4 @@
usage_usec 614953149468
user_usec 511415566817
system_usec 103537582650
core_sched.force_idle_usec 0

View file

@ -0,0 +1 @@
-1452543795404 1376681271659 1450950799997 -1473113374257

View file

@ -0,0 +1 @@
223372036854771712

View file

@ -0,0 +1,5 @@
cache 1739362304123123123
rss 1775325184
rss_huge 778043392
mapped_file 421036032
dirty -307200

View file

@ -0,0 +1 @@
223372036854771712

View file

@ -0,0 +1,5 @@
cache 1739362304123123123
rss 1775325184
rss_huge 778043392
mapped_file 421036032
dirty -307200

View file

@ -0,0 +1 @@
9223372036854771712

View file

@ -0,0 +1 @@
0

View file

@ -0,0 +1 @@
223372036854771712

View file

@ -0,0 +1,5 @@
cache 1739362304123123123
rss 1775325184
rss_huge 778043392
mapped_file 421036032
dirty -307200

View file

@ -0,0 +1 @@
223372036854771712

View file

@ -0,0 +1,5 @@
cache 1739362304123123123
rss 1775325184
rss_huge 778043392
mapped_file 421036032
dirty -307200

View file

@ -0,0 +1 @@
223372036854771712

View file

@ -0,0 +1,5 @@
cache 1739362304123123123
rss 1775325184
rss_huge 778043392
mapped_file 421036032
dirty -307200

View file

View file

@ -0,0 +1 @@
9223372036854771712

View file

@ -0,0 +1 @@
223372036854771712

View file

@ -0,0 +1,3 @@
0
-1
2

View file

@ -0,0 +1,8 @@
total=858067 N0=858067
file=406254 N0=406254
anon=451792 N0=451792
unevictable=21 N0=21
hierarchical_total=858067 N0=858067
hierarchical_file=406254 N0=406254
hierarchical_anon=451792 N0=451792
hierarchical_unevictable=21 N0=21

View file

@ -0,0 +1,5 @@
cache 1739362304123123123
rss 1775325184
rss_huge 778043392
mapped_file 421036032
dirty -307200

View file

@ -0,0 +1 @@
3513667584

View file

@ -0,0 +1 @@
12-781

View file

@ -0,0 +1 @@
0

View file

@ -0,0 +1 @@
0

View file

@ -0,0 +1 @@
4800000 100000

View file

@ -0,0 +1 @@
0

View file

@ -0,0 +1,2 @@
some avg10=0.00 avg60=0.08 avg300=0.06 total=293391454
full avg10=0.00 avg60=0.08 avg300=0.05 total=277111656

View file

@ -0,0 +1,9 @@
usage_usec 98701325189
user_usec 61355716211
system_usec 37345608977
core_sched.force_idle_usec 0
nr_periods 3936904
nr_throttled 6004
throttled_usec 19175137007
nr_bursts 0
burst_usec 0

View file

@ -0,0 +1 @@
79

View file

@ -0,0 +1 @@
1

View file

@ -0,0 +1 @@

View file

@ -0,0 +1 @@
0-95

View file

@ -0,0 +1 @@
member

View file

@ -0,0 +1 @@

View file

@ -0,0 +1 @@
0-1

View file

@ -0,0 +1 @@
0

View file

@ -0,0 +1 @@
max 0

View file

@ -0,0 +1 @@
max 0

View file

@ -0,0 +1 @@
max

View file

@ -0,0 +1 @@
total=0 N0=0 N1=0

View file

@ -0,0 +1 @@
0

View file

@ -0,0 +1 @@
max

View file

@ -0,0 +1 @@
0

View file

@ -0,0 +1 @@
max 0

View file

@ -0,0 +1 @@
max 0

View file

@ -0,0 +1 @@
max

View file

@ -0,0 +1 @@
total=0 N0=0 N1=0

View file

@ -0,0 +1 @@
0

View file

@ -0,0 +1 @@
max

View file

@ -0,0 +1 @@
default 100

View file

View file

@ -0,0 +1,2 @@
some avg10=0.00 avg60=0.00 avg300=0.00 total=185162400
full avg10=0.00 avg60=0.00 avg300=0.00 total=184607952

View file

@ -0,0 +1 @@
259:8 rbytes=74526720 wbytes=3789381632 rios=2936 wios=181928 dbytes=0 dios=0

View file

@ -0,0 +1 @@
13071106048

View file

@ -0,0 +1,6 @@
low 0
high 0
max 0
oom 0
oom_kill 0
oom_group_kill 0

View file

@ -0,0 +1,6 @@
low 0
high 0
max 0
oom 0
oom_kill 0
oom_group_kill 0

View file

@ -0,0 +1 @@
max

View file

@ -0,0 +1 @@
0

View file

@ -0,0 +1 @@
103079215104

View file

@ -0,0 +1 @@
0

View file

@ -0,0 +1,27 @@
anon N0=1330585600 N1=1141161984
file N0=4531773440 N1=4001075200
kernel_stack N0=4161536 N1=5537792
pagetables N0=7839744 N1=8462336
sec_pagetables N0=0 N1=0
shmem N0=0 N1=4096
file_mapped N0=10272768 N1=3940352
file_dirty N0=258048 N1=45056
file_writeback N0=0 N1=0
swapcached N0=0 N1=0
anon_thp N0=0 N1=2097152
file_thp N0=0 N1=0
shmem_thp N0=0 N1=0
inactive_anon N0=1330479104 N1=1141067776
active_anon N0=81920 N1=98304
inactive_file N0=1584979968 N1=1350430720
active_file N0=2946760704 N1=2650640384
unevictable N0=0 N1=0
slab_reclaimable N0=950447920 N1=1081869088
slab_unreclaimable N0=2654816 N1=2661512
workingset_refault_anon N0=0 N1=0
workingset_refault_file N0=346752 N1=282604
workingset_activate_anon N0=0 N1=0
workingset_activate_file N0=40145 N1=65541
workingset_restore_anon N0=0 N1=0
workingset_restore_file N0=19386 N1=10010
workingset_nodereclaim N0=0 N1=0

View file

@ -0,0 +1 @@
1

View file

@ -0,0 +1 @@
87302021120

View file

@ -0,0 +1,2 @@
some avg10=0.00 avg60=0.00 avg300=0.00 total=250773
full avg10=0.00 avg60=0.00 avg300=0.00 total=250662

View file

View file

@ -0,0 +1,47 @@
anon 2471755776
file 8532865024
kernel 2065149952
kernel_stack 9699328
pagetables 16302080
sec_pagetables 0
percpu 3528
sock 0
vmalloc 0
shmem 4096
file_mapped 14213120
file_dirty 319488
file_writeback 0
swapcached 0
anon_thp 2097152
file_thp 0
shmem_thp 0
inactive_anon 2471559168
active_anon 180224
inactive_file 2935459840
active_file 5597401088
unevictable 0
slab_reclaimable 2032322192
slab_unreclaimable 5318968
slab 2037641160
workingset_refault_anon 0
workingset_refault_file 629356
workingset_activate_anon 0
workingset_activate_file 105686
workingset_restore_anon 0
workingset_restore_file 29396
workingset_nodereclaim 0
pgscan 12149209
pgsteal 12139915
pgscan_kswapd 7712688
pgscan_direct 4436521
pgsteal_kswapd 7710225
pgsteal_direct 4429690
pgfault 1973187551
pgmajfault 8497
pgrefill 9153617
pgactivate 13516655
pgdeactivate 9151751
pglazyfree 5549
pglazyfreed 1
thp_fault_alloc 13
thp_collapse_alloc 3

View file

@ -0,0 +1 @@
0

View file

@ -0,0 +1,3 @@
high 0
max 0
fail 0

View file

@ -0,0 +1 @@
max

View file

@ -0,0 +1 @@
0

View file

@ -0,0 +1 @@
592

View file

@ -0,0 +1 @@
max 0

View file

@ -0,0 +1 @@
629145

View file

@ -0,0 +1 @@
2438