1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,137 @@
# Starlark Aggregator Plugin
This plugin allows to implement a custom aggregator plugin via a
[Starlark][starlark] script.
The Starlark language is a dialect of Python and will be familiar to those who
have experience with the Python language. However, there are major
[differences](#python-differences). Existing Python code is unlikely to work
unmodified.
> [!NOTE]
> The execution environment is sandboxed, and it is not possible to access the
> local filesystem or perfoming network operations. This is by design of the
> Starlark language as a configuration language.
The Starlark script used by this plugin needs to be composed of the three
methods defining an aggreagtor named `add`, `push` and `reset`.
The `add` method is called as soon as a new metric is added to the plugin the
metrics to the aggregator. After `period`, the `push` method is called to
output the resulting metrics and finally the aggregation is reset by using the
`reset` method of the Starlark script.
The Starlark functions might use the global function `state` to keep aggregation
information such as added metrics etc.
More details on the syntax and available functions can be found in the
[Starlark specification][spec].
⭐ Telegraf v1.21.0
🏷️ transformation
💻 all
[starlark]: https://github.com/google/starlark-go
[spec]: https://github.com/google/starlark-go/blob/d1966c6b9fcd/doc/spec.md
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Aggregate metrics using a Starlark script
[[aggregators.starlark]]
## The Starlark source can be set as a string in this configuration file, or
## by referencing a file containing the script. Only one source or script
## should be set at once.
##
## Source of the Starlark script.
source = '''
state = {}
def add(metric):
state["last"] = metric
def push():
return state.get("last")
def reset():
state.clear()
'''
## File containing a Starlark script.
# script = "/usr/local/bin/myscript.star"
## The constants of the Starlark script.
# [aggregators.starlark.constants]
# max_size = 10
# threshold = 0.75
# default_name = "Julia"
# debug_mode = true
```
## Usage
The Starlark code should contain a function called `add` that takes a metric as
argument. The function will be called with each metric to add, and doesn't
return anything.
```python
def add(metric):
state["last"] = metric
```
The Starlark code should also contain a function called `push` that doesn't take
any argument. The function will be called to compute the aggregation, and
returns the metrics to push to the accumulator.
```python
def push():
return state.get("last")
```
The Starlark code should also contain a function called `reset` that doesn't
take any argument. The function will be called to reset the plugin, and doesn't
return anything.
```python
def reset():
state.clear()
```
For a list of available types and functions that can be used in the code, see
the [Starlark specification][spec].
## Python Differences
Refer to the section [Python
Differences](../../processors/starlark/README.md#python-differences) of the
documentation about the Starlark processor.
## Libraries available
Refer to the section [Libraries
available](../../processors/starlark/README.md#libraries-available) of the
documentation about the Starlark processor.
## Common Questions
Refer to the section [Common
Questions](../../processors/starlark/README.md#common-questions) of the
documentation about the Starlark processor.
## Examples
- [minmax](testdata/min_max.star)
- [merge](testdata/merge.star)
[All examples](testdata) are in the testdata folder.
Open a Pull Request to add any other useful Starlark examples.

View file

@ -0,0 +1,29 @@
# Aggregate metrics using a Starlark script
[[aggregators.starlark]]
## The Starlark source can be set as a string in this configuration file, or
## by referencing a file containing the script. Only one source or script
## should be set at once.
##
## Source of the Starlark script.
source = '''
state = {}
def add(metric):
state["last"] = metric
def push():
return state.get("last")
def reset():
state.clear()
'''
## File containing a Starlark script.
# script = "/usr/local/bin/myscript.star"
## The constants of the Starlark script.
# [aggregators.starlark.constants]
# max_size = 10
# threshold = 0.75
# default_name = "Julia"
# debug_mode = true

View file

@ -0,0 +1,114 @@
//go:generate ../../../tools/readme_config_includer/generator
package starlark
import (
_ "embed"
"go.starlark.net/starlark"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
common "github.com/influxdata/telegraf/plugins/common/starlark"
)
//go:embed sample.conf
var sampleConfig string
type Starlark struct {
common.Common
}
func (*Starlark) SampleConfig() string {
return sampleConfig
}
func (s *Starlark) Init() error {
// Execute source
err := s.Common.Init()
if err != nil {
return err
}
// The source should define an add function.
err = s.AddFunction("add", &common.Metric{})
if err != nil {
return err
}
// The source should define a push function.
err = s.AddFunction("push")
if err != nil {
return err
}
// The source should define a reset function.
err = s.AddFunction("reset")
if err != nil {
return err
}
return nil
}
func (s *Starlark) Add(metric telegraf.Metric) {
parameters, found := s.GetParameters("add")
if !found {
s.Log.Errorf("The parameters of the add function could not be found")
return
}
parameters[0].(*common.Metric).Wrap(metric)
_, err := s.Call("add")
if err != nil {
s.LogError(err)
}
}
func (s *Starlark) Push(acc telegraf.Accumulator) {
rv, err := s.Call("push")
if err != nil {
s.LogError(err)
acc.AddError(err)
return
}
switch rv := rv.(type) {
case *starlark.List:
iter := rv.Iterate()
defer iter.Done()
var v starlark.Value
for iter.Next(&v) {
switch v := v.(type) {
case *common.Metric:
m := v.Unwrap()
acc.AddMetric(m)
default:
s.Log.Errorf("Invalid type returned in list: %s", v.Type())
}
}
case *common.Metric:
m := rv.Unwrap()
acc.AddMetric(m)
case starlark.NoneType:
default:
s.Log.Errorf("Invalid type returned: %T", rv)
}
}
func (s *Starlark) Reset() {
_, err := s.Call("reset")
if err != nil {
s.LogError(err)
}
}
// init initializes starlark aggregator plugin
func init() {
aggregators.Add("starlark", func() telegraf.Aggregator {
return &Starlark{
Common: common.Common{
StarlarkLoadFunc: common.LoadFunc,
},
}
})
}

View file

@ -0,0 +1,434 @@
package starlark
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
common "github.com/influxdata/telegraf/plugins/common/starlark"
"github.com/influxdata/telegraf/testutil"
)
var m1 = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(1),
"c": int64(1),
"d": int64(1),
"e": int64(1),
"f": int64(2),
"g": int64(2),
"h": int64(2),
"i": int64(2),
"j": int64(3),
},
time.Now(),
)
var m2 = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(3),
"c": int64(3),
"d": int64(3),
"e": int64(3),
"f": int64(1),
"g": int64(1),
"h": int64(1),
"i": int64(1),
"j": int64(1),
"k": int64(200),
"l": int64(200),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
minmax, err := newMinMax()
require.NoError(b, err)
for n := 0; n < b.N; n++ {
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added.
func TestMinMaxWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax, err := newMinMax()
require.NoError(t, err)
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": int64(1),
"a_min": int64(1),
"b_max": int64(3),
"b_min": int64(1),
"c_max": int64(3),
"c_min": int64(1),
"d_max": int64(3),
"d_min": int64(1),
"e_max": int64(3),
"e_min": int64(1),
"f_max": int64(2),
"f_min": int64(1),
"g_max": int64(2),
"g_min": int64(1),
"h_max": int64(2),
"h_min": int64(1),
"i_max": int64(2),
"i_min": int64(1),
"j_max": int64(3),
"j_min": int64(1),
"k_max": int64(200),
"k_min": int64(200),
"l_max": int64(200),
"l_min": int64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestMinMaxDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax, err := newMinMax()
require.NoError(t, err)
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": int64(1),
"a_min": int64(1),
"b_max": int64(1),
"b_min": int64(1),
"c_max": int64(1),
"c_min": int64(1),
"d_max": int64(1),
"d_min": int64(1),
"e_max": int64(1),
"e_min": int64(1),
"f_max": int64(2),
"f_min": int64(2),
"g_max": int64(2),
"g_min": int64(2),
"h_max": int64(2),
"h_min": int64(2),
"i_max": int64(2),
"i_min": int64(2),
"j_max": int64(3),
"j_min": int64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_max": int64(1),
"a_min": int64(1),
"b_max": int64(3),
"b_min": int64(3),
"c_max": int64(3),
"c_min": int64(3),
"d_max": int64(3),
"d_min": int64(3),
"e_max": int64(3),
"e_min": int64(3),
"f_max": int64(1),
"f_min": int64(1),
"g_max": int64(1),
"g_min": int64(1),
"h_max": int64(1),
"h_min": int64(1),
"i_max": int64(1),
"i_min": int64(1),
"j_max": int64(1),
"j_min": int64(1),
"k_max": int64(200),
"k_min": int64(200),
"l_max": int64(200),
"l_min": int64(200),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
func newMinMax() (*Starlark, error) {
return newStarlarkFromScript("testdata/min_max.star")
}
func TestSimple(t *testing.T) {
plugin, err := newMerge()
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 0),
),
)
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 0),
),
)
require.NoError(t, err)
var acc testutil.Accumulator
plugin.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
"time_guest": 42,
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func TestNanosecondPrecision(t *testing.T) {
plugin, err := newMerge()
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 1),
),
)
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 1),
),
)
require.NoError(t, err)
var acc testutil.Accumulator
acc.SetPrecision(time.Second)
plugin.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
"time_guest": 42,
},
time.Unix(0, 1),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func TestReset(t *testing.T) {
plugin, err := newMerge()
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 0),
),
)
require.NoError(t, err)
var acc testutil.Accumulator
plugin.Push(&acc)
plugin.Reset()
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 0),
),
)
require.NoError(t, err)
plugin.Push(&acc)
expected := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 42,
},
time.Unix(0, 0),
),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
}
func newMerge() (*Starlark, error) {
return newStarlarkFromScript("testdata/merge.star")
}
func TestLastFromSource(t *testing.T) {
acc := testutil.Accumulator{}
plugin, err := newStarlarkFromSource(`
state = {}
def add(metric):
state["last"] = metric
def push():
return state.get("last")
def reset():
state.clear()
`)
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 0),
),
)
require.NoError(t, err)
plugin.Add(
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu2",
},
map[string]interface{}{
"time_idle": 31,
},
time.Unix(0, 0),
),
)
require.NoError(t, err)
plugin.Push(&acc)
expectedFields := map[string]interface{}{
"time_idle": int64(31),
}
expectedTags := map[string]string{
"cpu": "cpu2",
}
acc.AssertContainsTaggedFields(t, "cpu", expectedFields, expectedTags)
plugin.Reset()
}
func newStarlarkFromSource(source string) (*Starlark, error) {
plugin := &Starlark{
Common: common.Common{
StarlarkLoadFunc: common.LoadFunc,
Log: testutil.Logger{},
Source: source,
},
}
err := plugin.Init()
if err != nil {
return nil, err
}
return plugin, nil
}
func newStarlarkFromScript(script string) (*Starlark, error) {
plugin := &Starlark{
Common: common.Common{
StarlarkLoadFunc: common.LoadFunc,
Log: testutil.Logger{},
Script: script,
},
}
err := plugin.Init()
if err != nil {
return nil, err
}
return plugin, nil
}

View file

@ -0,0 +1,31 @@
# Example of a merge aggregator implemented with a starlark script.
load('time.star', 'time')
state = {}
def add(metric):
metrics = state.get("metrics")
if metrics == None:
metrics = {}
state["metrics"] = metrics
state["ordered"] = []
gId = groupID(metric)
m = metrics.get(gId)
if m == None:
m = deepcopy(metric)
metrics[gId] = m
state["ordered"].append(m)
else:
for k, v in metric.fields.items():
m.fields[k] = v
def push():
return state.get("ordered")
def reset():
state.clear()
def groupID(metric):
key = metric.name + "-"
for k, v in metric.tags.items():
key = key + k + "-" + v + "-"
key = key + "-" + str(metric.time)
return hash(key)

View file

@ -0,0 +1,53 @@
# Example of a min_max aggregator implemented with a starlark script.
supported_types = (["int", "float"])
state = {}
def add(metric):
gId = groupID(metric)
aggregate = state.get(gId)
if aggregate == None:
aggregate = {
"name": metric.name,
"tags": metric.tags,
"fields": {}
}
for k, v in metric.fields.items():
if type(v) in supported_types:
aggregate["fields"][k] = {
"min": v,
"max": v,
}
state[gId] = aggregate
else:
for k, v in metric.fields.items():
if type(v) in supported_types:
min_max = aggregate["fields"].get(k)
if min_max == None:
aggregate["fields"][k] = {
"min": v,
"max": v,
}
elif v < min_max["min"]:
aggregate["fields"][k]["min"] = v
elif v > min_max["max"]:
aggregate["fields"][k]["max"] = v
def push():
metrics = []
for a in state:
fields = {}
for k in state[a]["fields"]:
fields[k + "_min"] = state[a]["fields"][k]["min"]
fields[k + "_max"] = state[a]["fields"][k]["max"]
m = Metric(state[a]["name"], state[a]["tags"], fields)
metrics.append(m)
return metrics
def reset():
state.clear()
def groupID(metric):
key = metric.name + "-"
for k, v in metric.tags.items():
key = key + k + "-" + v
return hash(key)