1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,47 @@
# Example README
This description explains at a high level what the serializer does and
provides links to where additional information about the format can be found.
## Configuration
This section contains the sample configuration for the serializer. Since the
configuration for a serializer is not have a standalone plugin, use the `file`
or `http` outputs as the base config.
```toml
[[inputs.file]]
files = ["stdout"]
## Describe variables using the standard SampleConfig style.
## https://github.com/influxdata/telegraf/wiki/SampleConfig
example_option = "example_value"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "example"
```
### example_option
If an option requires a more expansive explanation than can be included inline
in the sample configuration, it may be described here.
## Metrics
The optional Metrics section contains details about how the serializer converts
Telegraf metrics into output.
## Example
The optional Example section can show an example conversion to the output
format using InfluxDB Line Protocol as the reference format.
For line delimited text formats a diff may be appropriate:
```diff
- cpu,host=localhost,source=example.org value=42
+ cpu|host=localhost|source=example.org|value=42
```

View file

@ -0,0 +1 @@
package all

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.binary
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/binary" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.carbon2
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/carbon2" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.cloudevents
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/cloudevents" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.csv
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/csv" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.graphite
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/graphite" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.influx
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/influx" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.json
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/json" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.msgpack
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/msgpack" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.nowmetric
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/nowmetric" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.prometheus
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/prometheus" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.prometheusremotewrite
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/prometheusremotewrite" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.splunkmetric
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/splunkmetric" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.template
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/template" // register plugin
)

View file

@ -0,0 +1,7 @@
//go:build !custom || serializers || serializers.wavefront
package all
import (
_ "github.com/influxdata/telegraf/plugins/serializers/wavefront" // register plugin
)

View file

@ -0,0 +1,134 @@
# Binary Serializer Plugin
The `binary` data format serializer serializes metrics into binary protocols using
user-specified configurations.
## Configuration
```toml
[[outputs.socket_writer]]
address = "tcp://127.0.0.1:54000"
metric_batch_size = 1
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "binary"
## Specify the endianness of the data.
## Available values are "little" (little-endian), "big" (big-endian) and "host",
## where "host" means the same endianness as the machine running Telegraf.
# endianness = "host"
## Definition of the message format and the serialized data.
## Please note that you need to define all elements of the data in the
## correct order.
## An entry can have the following properties:
## read_from -- Source of the data.
## Can be "field", "tag", "time" or "name".
## If omitted "field" is assumed.
## name -- Name of the element (e.g. field or tag).
## Can be omitted for "time" and "name".
## data_format -- Target data-type of the entry. Can be "int8/16/32/64", "uint8/16/32/64",
## "float32/64", "string".
## In case of time, this can be any of "unix" (default), "unix_ms", "unix_us",
## "unix_ns".
## If original field type is different from the target type, the field will be converted
## If loss of precision is possible, warning will be logged.
## string_length -- Length of the string in bytes. Only used for "string" type.
## string_terminator -- Terminator for strings. Only used for "string" type.
## Valid values are "null", "0x00", "00", "0x01", etc.
## If original string length is greater than "string_length" the string will
## be truncated to have length of the `string + terminator = string_length`.
## If original string length is smaller than "string_length" the string
## will be padded with terminator to have length of "string_length". (e.g. "abcd\0\0\0\0\0")
## Defaults to "null" for strings.
entries = [
{ read_from = "field", name = "addr_3", data_format="int16" },
{ read_from = "field", name = "addr_2", data_format="int16" },
{ read_from = "field", name = "addr_4_5", data_format="int32" },
{ read_from = "field", name = "addr_6_7", data_format="float32" },
{ read_from = "field", name = "addr_16_20", data_format="string", string_terminator = "null", string_length = 11 },
{ read_from = "field", name = "addr_3_sc", data_format="float64" }
]
```
### General options and remarks
#### Value conversion
The plugin will try to convert the value of the field to the target data type. If the conversion is not possible without precision loss value is converted and warning is logged.
Conversions are allowed between all supported data types.
### Examples
In the following example, we read some registers from a Modbus device and serialize them into a binary protocol.
```toml
# Retrieve data from MODBUS slave devices
[[inputs.modbus]]
name = "device"
slave_id = 1
timeout = "1s"
controller = "tcp://127.0.0.1:5020"
configuration_type = "register"
holding_registers = [
{ name = "addr_2", byte_order = "AB", data_type="UINT16", scale=1.0, address = [2] },
{ name = "addr_3", byte_order = "AB", data_type="UINT16", scale=1.0, address = [3] },
{ name = "addr_4_5", byte_order = "ABCD", data_type="UINT32", scale=1.0, address = [4,5] },
{ name = "addr_6_7", byte_order = "ABCD", data_type="FLOAT32-IEEE", scale=1.0, address = [6,7] },
{ name = "addr_16_20", byte_order = "ABCD", data_type="STRING", address = [16,17,18,19,20] },
{ name = "addr_3_sc", byte_order = "AB", data_type="UFIXED", scale=0.1, address = [3] }
]
[[outputs.socket_writer]]
address = "tcp://127.0.0.1:54000"
metric_batch_size = 1
data_format = "binary"
endianness = "little"
entries = [
{ read_from = "field", name = "addr_3", data_format="int16" },
{ read_from = "field", name = "addr_2", data_format="int16" },
{ read_from = "field", name = "addr_4_5", data_format="int32" },
{ read_from = "field", name = "addr_6_7", data_format="float32" },
{ read_from = "field", name = "addr_16_20", data_format="string", string_terminator = "null", string_length = 11 },
{ read_from = "field", name = "addr_3_sc", data_format="float64" },
{ read_from = "time", data_format="int32", time_format="unix" },
{ read_from = "name", data_format="string", string_terminator = "null", string_length = 20 }
]
```
On the receiving side, we expect the following message structure:
```cpp
#pragma pack(push, 1)
struct test_struct
{
short addr_3;
short addr_2;
int addr_4_5;
float addr_6_7;
char addr_16_20[11];
double addr_3_sc;
int time;
char metric_name[20];
};
#pragma pack(pop)
```
Produced message:
```text
69420700296a0900c395d343415f425f435f445f455f006766666666909a407c0082656d6f646275730000000000000000000000000000
```
| addr_3 | addr_2 | addr_4_5 | addr_6_7 | addr_16_20 | addr_3_sc | time | metric_name |
|--------|--------|----------|-------------------|------------------------|--------------------|------------|--------------------------------------------|
| 0x6942 | 0700 | 296a0900 | c395d343 | 415f425f435f445f455f00 | 6766666666909a40 | 0x7c008265 | 0x6d6f646275730000000000000000000000000000 |
| 17001 | 7 | 617001 | 423.1700134277344 | A_B_C_D_E_ | 1700.1000000000001 | 1703018620 | modbus |

View file

@ -0,0 +1,108 @@
package binary
import (
"encoding/binary"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/serializers"
)
type Serializer struct {
Entries []*Entry `toml:"entries"`
Endianness string `toml:"endianness"`
converter binary.ByteOrder
}
func (s *Serializer) Init() error {
switch s.Endianness {
case "big":
s.converter = binary.BigEndian
case "little":
s.converter = binary.LittleEndian
case "", "host":
s.Endianness = "host"
s.converter = internal.HostEndianness
default:
return fmt.Errorf("invalid endianness %q", s.Endianness)
}
for i, entry := range s.Entries {
if err := entry.fillDefaults(); err != nil {
return fmt.Errorf("entry %d check failed: %w", i, err)
}
}
return nil
}
func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) {
serialized := make([]byte, 0)
for _, entry := range s.Entries {
switch entry.ReadFrom {
case "field":
field, found := metric.GetField(entry.Name)
if !found {
return nil, fmt.Errorf("field %s not found", entry.Name)
}
entryBytes, err := entry.serializeValue(field, s.converter)
if err != nil {
return nil, err
}
serialized = append(serialized, entryBytes...)
case "tag":
tag, found := metric.GetTag(entry.Name)
if !found {
return nil, fmt.Errorf("tag %s not found", entry.Name)
}
entryBytes, err := entry.serializeValue(tag, s.converter)
if err != nil {
return nil, err
}
serialized = append(serialized, entryBytes...)
case "time":
entryBytes, err := entry.serializeValue(metric.Time(), s.converter)
if err != nil {
return nil, err
}
serialized = append(serialized, entryBytes...)
case "name":
entryBytes, err := entry.serializeValue(metric.Name(), s.converter)
if err != nil {
return nil, err
}
serialized = append(serialized, entryBytes...)
}
}
return serialized, nil
}
func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
serialized := make([]byte, 0)
for _, metric := range metrics {
m, err := s.Serialize(metric)
if err != nil {
return nil, err
}
serialized = append(serialized, m...)
}
return serialized, nil
}
func init() {
serializers.Add("binary",
func() telegraf.Serializer {
return &Serializer{}
},
)
}

View file

@ -0,0 +1,121 @@
package binary
import (
"encoding/hex"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/metric"
)
func TestMetricSerialization(t *testing.T) {
m := metric.New(
"modbus",
map[string]string{
"tag_1": "ABC",
"tag_2": "1.63",
},
map[string]interface{}{
"addr_2": 7,
"addr_3": 17001,
"addr_4_5": 617001,
"addr_6_7": 423.1700134277344,
"addr_16_20": "A_B_C_D_E_",
"addr_3_sc": 1700.1000000000001,
},
time.Unix(1703018620, 0),
)
tests := []struct {
name string
entries []*Entry
expected map[string]string
}{
{
name: "complex metric serialization",
entries: []*Entry{
{
ReadFrom: "field",
Name: "addr_3",
DataFormat: "int16",
},
{
ReadFrom: "field",
Name: "addr_2",
DataFormat: "int16",
},
{
ReadFrom: "field",
Name: "addr_4_5",
DataFormat: "int32",
},
{
ReadFrom: "field",
Name: "addr_6_7",
DataFormat: "float32",
},
{
ReadFrom: "field",
Name: "addr_16_20",
DataFormat: "string",
StringTerminator: "null",
StringLength: 11,
},
{
ReadFrom: "field",
Name: "addr_3_sc",
DataFormat: "float64",
},
{
ReadFrom: "time",
DataFormat: "int32",
TimeFormat: "unix",
},
{
ReadFrom: "name",
DataFormat: "string",
StringTerminator: "null",
StringLength: 20,
},
{
ReadFrom: "tag",
Name: "tag_1",
DataFormat: "string",
StringLength: 4,
},
{
ReadFrom: "tag",
Name: "tag_2",
DataFormat: "float32",
},
},
expected: map[string]string{
"little": "69420700296a0900c395d343415f425f435f445f455f006766666666909a407c" +
"0082656d6f64627573000000000000000000000000000041424300d7a3d03f",
"big": "4269000700096a2943d395c3415f425f435f445f455f00409a90666666666765" +
"82007c6d6f646275730000000000000000000000000000414243003fd0a3d7",
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
for endianness, expected := range tc.expected {
serializer := &Serializer{
Entries: tc.entries,
Endianness: endianness,
}
require.NoError(t, serializer.Init())
serialized, err := serializer.Serialize(m)
actual := hex.EncodeToString(serialized)
require.NoError(t, err)
require.Equal(t, expected, actual)
}
})
}
}

View file

@ -0,0 +1,133 @@
package binary
import (
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"strings"
"time"
)
type converterFunc func(value interface{}, order binary.ByteOrder) ([]byte, error)
type Entry struct {
ReadFrom string `toml:"read_from"` // field, tag, time, name
Name string `toml:"name"` // name of entry
DataFormat string `toml:"data_format"` // int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, string
StringTerminator string `toml:"string_terminator"` // for string metrics: null, 0x00, 00, ....
StringLength uint64 `toml:"string_length"` // for string only, target size
TimeFormat string `toml:"time_format"` // for time metrics: unix, unix_ms, unix_us, unix_ns
converter converterFunc
termination byte
}
func (e *Entry) fillDefaults() error {
// Normalize
e.ReadFrom = strings.ToLower(e.ReadFrom)
// Check input constraints
switch e.ReadFrom {
case "":
e.ReadFrom = "field"
fallthrough
case "field", "tag":
if e.Name == "" {
return errors.New("missing name")
}
case "time":
switch e.TimeFormat {
case "":
e.TimeFormat = "unix"
case "unix", "unix_ms", "unix_us", "unix_ns":
default:
return errors.New("invalid time format")
}
case "name":
if e.DataFormat == "" {
e.DataFormat = "string"
} else if e.DataFormat != "string" {
return errors.New("name data format has to be string")
}
default:
return fmt.Errorf("unknown assignment %q", e.ReadFrom)
}
// Check data format
switch e.DataFormat {
case "":
return errors.New("missing data format")
case "float64":
e.converter = convertToFloat64
case "float32":
e.converter = convertToFloat32
case "uint64":
e.converter = convertToUint64
case "uint32":
e.converter = convertToUint32
case "uint16":
e.converter = convertToUint16
case "uint8":
e.converter = convertToUint8
case "int64":
e.converter = convertToInt64
case "int32":
e.converter = convertToInt32
case "int16":
e.converter = convertToInt16
case "int8":
e.converter = convertToInt8
case "string":
switch e.StringTerminator {
case "", "null":
e.termination = 0x00
default:
e.StringTerminator = strings.TrimPrefix(e.StringTerminator, "0x")
termination, err := hex.DecodeString(e.StringTerminator)
if err != nil {
return fmt.Errorf("decoding terminator failed for %q: %w", e.Name, err)
}
if len(termination) != 1 {
return fmt.Errorf("terminator must be a single byte, got %q", e.StringTerminator)
}
e.termination = termination[0]
}
if e.StringLength < 1 {
return errors.New("string length must be at least 1")
}
e.converter = e.convertToString
default:
return fmt.Errorf("invalid data format %q for field %q", e.ReadFrom, e.DataFormat)
}
return nil
}
func (e *Entry) serializeValue(value interface{}, order binary.ByteOrder) ([]byte, error) {
// Handle normal fields, tags, etc
if e.ReadFrom != "time" {
return e.converter(value, order)
}
// We need to serialize the time, make sure we actually do get a time and
// convert it to the correct timestamp (with scale) first.
t, ok := value.(time.Time)
if !ok {
return nil, fmt.Errorf("time expected but got %T", value)
}
var timestamp int64
switch e.TimeFormat {
case "unix":
timestamp = t.Unix()
case "unix_ms":
timestamp = t.UnixMilli()
case "unix_us":
timestamp = t.UnixMicro()
case "unix_ns":
timestamp = t.UnixNano()
}
return e.converter(timestamp, order)
}

View file

@ -0,0 +1,305 @@
package binary
import (
"encoding/binary"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/internal"
)
func TestSerialization(t *testing.T) {
tests := []struct {
name string
entry *Entry
input interface{}
expected map[binary.ByteOrder][]byte
overflow bool
}{
{
name: "positive int serialization",
entry: &Entry{Name: "test", DataFormat: "int32"},
input: 1,
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x00, 0x00, 0x00, 0x01},
binary.LittleEndian: {0x01, 0x00, 0x00, 0x00},
},
},
{
name: "negative int serialization",
entry: &Entry{Name: "test", DataFormat: "int32"},
input: -1,
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0xff, 0xff, 0xff, 0xff},
binary.LittleEndian: {0xff, 0xff, 0xff, 0xff},
},
},
{
name: "negative int serialization | uint32 representation",
entry: &Entry{Name: "test", DataFormat: "uint32"},
input: -1,
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0xff, 0xff, 0xff, 0xff},
binary.LittleEndian: {0xff, 0xff, 0xff, 0xff},
},
overflow: true,
},
{
name: "uint to int serialization",
entry: &Entry{Name: "test", DataFormat: "uint8"},
input: uint(1),
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x01},
binary.LittleEndian: {0x01},
},
},
{
name: "string serialization",
entry: &Entry{Name: "test", DataFormat: "string", StringLength: 4},
input: "test",
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x74, 0x65, 0x73, 0x00},
binary.LittleEndian: {0x74, 0x65, 0x73, 0x00},
},
},
{
name: "string serialization with terminator",
entry: &Entry{Name: "test", DataFormat: "string", StringLength: 5, StringTerminator: "null"},
input: "test",
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x74, 0x65, 0x73, 0x74, 0x00},
binary.LittleEndian: {0x74, 0x65, 0x73, 0x74, 0x00},
},
},
{
name: "string serialization with hex terminator",
entry: &Entry{Name: "test", DataFormat: "string", StringLength: 5, StringTerminator: "0x01"},
input: "test",
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x74, 0x65, 0x73, 0x74, 0x01},
binary.LittleEndian: {0x74, 0x65, 0x73, 0x74, 0x01},
},
},
{
name: "time serialization",
entry: &Entry{ReadFrom: "time", DataFormat: "uint64"},
input: time.Date(2024, time.January, 6, 19, 44, 10, 0, time.UTC),
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x00, 0x00, 0x00, 0x00, 0x65, 0x99, 0xad, 0x8a},
binary.LittleEndian: {0x8a, 0xad, 0x99, 0x65, 0x00, 0x00, 0x00, 0x00},
},
},
{
name: "float32 serialization",
entry: &Entry{Name: "test", DataFormat: "float32"},
input: float32(3.1415),
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x40, 0x49, 0x0e, 0x56},
binary.LittleEndian: {0x56, 0x0e, 0x49, 0x40},
},
},
{
name: "float32 serialization | float64 representation",
entry: &Entry{Name: "test", DataFormat: "float64"},
input: float32(3.1415),
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x40, 0x09, 0x21, 0xCA, 0xC0, 0x00, 0x00, 0x00},
binary.LittleEndian: {0x00, 0x00, 0x00, 0xC0, 0xCA, 0x21, 0x09, 0x40},
},
},
{
name: "float64 serialization",
entry: &Entry{Name: "test", DataFormat: "float64"},
input: 3.1415,
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x40, 0x09, 0x21, 0xCA, 0xC0, 0x83, 0x12, 0x6F},
binary.LittleEndian: {0x6F, 0x12, 0x83, 0xC0, 0xCA, 0x21, 0x09, 0x40},
},
},
{
name: "float64 serialization | float32 representation",
entry: &Entry{Name: "test", DataFormat: "float32"},
input: 3.1415,
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x40, 0x49, 0x0e, 0x56},
binary.LittleEndian: {0x56, 0x0e, 0x49, 0x40},
},
},
{
name: "float64 serialization | int64 representation",
entry: &Entry{Name: "test", DataFormat: "int64"},
input: 3.1415,
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03},
binary.LittleEndian: {0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
},
},
{
name: "float64 serialization | uint8 representation",
entry: &Entry{Name: "test", DataFormat: "uint8"},
input: 3.1415,
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x03}, binary.LittleEndian: {0x03},
},
},
{
name: "uint serialization | float32 representation",
entry: &Entry{Name: "test", DataFormat: "float32"},
input: uint(1),
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x3f, 0x80, 0x00, 0x00},
binary.LittleEndian: {0x00, 0x00, 0x80, 0x3f},
},
},
{
name: "uint serialization | float64 representation",
entry: &Entry{Name: "test", DataFormat: "float64"},
input: uint(1),
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x3f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
binary.LittleEndian: {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f},
},
},
{
name: "int serialization | float32 representation",
entry: &Entry{Name: "test", DataFormat: "float32"},
input: -101,
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0xc2, 0xca, 0x00, 0x00},
binary.LittleEndian: {0x00, 0x00, 0xca, 0xc2},
},
},
{
name: "string serialization | float32 representation",
entry: &Entry{Name: "test", DataFormat: "float32"},
input: "-101.25",
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0xc2, 0xca, 0x80, 0x00},
binary.LittleEndian: {0x00, 0x80, 0xca, 0xc2},
},
},
{
name: "string serialization | int32 representation",
entry: &Entry{Name: "test", DataFormat: "int32"},
input: "1",
expected: map[binary.ByteOrder][]byte{
binary.BigEndian: {0x00, 0x00, 0x00, 0x01},
binary.LittleEndian: {0x01, 0x00, 0x00, 0x00},
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
require.NoError(t, tc.entry.fillDefaults())
for endianness, expected := range tc.expected {
value, err := tc.entry.serializeValue(tc.input, endianness)
if tc.overflow {
require.ErrorIs(t, err, internal.ErrOutOfRange)
} else {
require.NoError(t, err)
}
require.Equal(t, expected, value)
}
})
}
}
func TestNoNameSerialization(t *testing.T) {
e := &Entry{}
require.ErrorContains(t, e.fillDefaults(), "missing name")
}
func BenchmarkSerialization(b *testing.B) {
entries := []struct {
entry *Entry
input interface{}
}{
{
entry: &Entry{Name: "test", DataFormat: "int32"},
input: 1,
},
{
entry: &Entry{Name: "test", DataFormat: "int32"},
input: -1,
},
{
entry: &Entry{Name: "test", DataFormat: "uint8"},
input: uint(1),
},
{
entry: &Entry{Name: "test", DataFormat: "string", StringLength: 4},
input: "test",
},
{
entry: &Entry{Name: "test", DataFormat: "string", StringLength: 5, StringTerminator: "null"},
input: "test",
},
{
entry: &Entry{Name: "test", DataFormat: "string", StringLength: 5, StringTerminator: "0x01"},
input: "test",
},
{
entry: &Entry{ReadFrom: "time", DataFormat: "uint64"},
input: time.Date(2024, time.January, 6, 19, 44, 10, 0, time.UTC),
},
{
entry: &Entry{Name: "test", DataFormat: "float32"},
input: float32(3.1415),
},
{
entry: &Entry{Name: "test", DataFormat: "float64"},
input: float32(3.1415),
},
{
entry: &Entry{Name: "test", DataFormat: "float64"},
input: 3.1415,
},
{
entry: &Entry{Name: "test", DataFormat: "float32"},
input: 3.1415,
},
{
entry: &Entry{Name: "test", DataFormat: "int64"},
input: 3.1415,
},
{
entry: &Entry{Name: "test", DataFormat: "uint8"},
input: 3.1415,
},
{
entry: &Entry{Name: "test", DataFormat: "float32"},
input: uint(1),
},
{
entry: &Entry{Name: "test", DataFormat: "float64"},
input: uint(1),
},
{
entry: &Entry{Name: "test", DataFormat: "float32"},
input: -101,
},
{
entry: &Entry{Name: "test", DataFormat: "float32"},
input: "-101.25",
},
{
entry: &Entry{Name: "test", DataFormat: "int32"},
input: "1",
},
}
for _, tc := range entries {
require.NoError(b, tc.entry.fillDefaults())
}
for i := 0; i < b.N; i++ {
for _, tc := range entries {
_, err := tc.entry.serializeValue(tc.input, binary.BigEndian)
require.NoError(b, err)
}
}
}

View file

@ -0,0 +1,110 @@
package binary
import (
"encoding/binary"
"math"
"github.com/influxdata/telegraf/internal"
)
func (e *Entry) convertToString(value interface{}, _ binary.ByteOrder) ([]byte, error) {
v, err := internal.ToString(value)
if err != nil {
return nil, err
}
buf := []byte(v)
// If string is longer than target length, truncate it and append terminator.
// Thus, there is one less place for the data so that the terminator can be placed.
if len(buf) >= int(e.StringLength) {
dataLength := int(e.StringLength) - 1
return append(buf[:dataLength], e.termination), nil
}
for i := len(buf); i < int(e.StringLength); i++ {
buf = append(buf, e.termination)
}
return buf, nil
}
func convertToUint64(value interface{}, order binary.ByteOrder) ([]byte, error) {
buf := make([]byte, 8)
v, err := internal.ToUint64(value)
order.PutUint64(buf, v)
return buf, err
}
func convertToUint32(value interface{}, order binary.ByteOrder) ([]byte, error) {
buf := make([]byte, 4)
v, err := internal.ToUint32(value)
order.PutUint32(buf, v)
return buf, err
}
func convertToUint16(value interface{}, order binary.ByteOrder) ([]byte, error) {
buf := make([]byte, 2)
v, err := internal.ToUint16(value)
order.PutUint16(buf, v)
return buf, err
}
func convertToUint8(value interface{}, _ binary.ByteOrder) ([]byte, error) {
v, err := internal.ToUint8(value)
return []byte{v}, err
}
func convertToInt64(value interface{}, order binary.ByteOrder) ([]byte, error) {
buf := make([]byte, 8)
v, err := internal.ToInt64(value)
order.PutUint64(buf, uint64(v))
return buf, err
}
func convertToInt32(value interface{}, order binary.ByteOrder) ([]byte, error) {
buf := make([]byte, 4)
v, err := internal.ToInt32(value)
order.PutUint32(buf, uint32(v))
return buf, err
}
func convertToInt16(value interface{}, order binary.ByteOrder) ([]byte, error) {
buf := make([]byte, 2)
v, err := internal.ToInt16(value)
order.PutUint16(buf, uint16(v))
return buf, err
}
func convertToInt8(value interface{}, _ binary.ByteOrder) ([]byte, error) {
v, err := internal.ToInt8(value)
return []byte{uint8(v)}, err
}
func convertToFloat64(value interface{}, order binary.ByteOrder) ([]byte, error) {
v, err := internal.ToFloat64(value)
if err != nil {
return nil, err
}
buf := make([]byte, 8)
x := math.Float64bits(v)
order.PutUint64(buf, x)
return buf, nil
}
func convertToFloat32(value interface{}, order binary.ByteOrder) ([]byte, error) {
v, err := internal.ToFloat32(value)
if err != nil {
return nil, err
}
buf := make([]byte, 4)
x := math.Float32bits(v)
order.PutUint32(buf, x)
return buf, nil
}

View file

@ -0,0 +1,98 @@
# Carbon2
The `carbon2` serializer translates the Telegraf metric format to the [Carbon2 format](http://metrics20.org/implementations/).
## Configuration
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "carbon2"
## Optionally configure metrics format, whether to merge metric name and field name.
## Possible options:
## * "field_separate"
## * "metric_includes_field"
## * "" - defaults to "field_separate"
# carbon2_format = "field_separate"
## Character used for replacing sanitized characters. By default ":" is used.
## The following character set is being replaced with sanitize replace char:
## !@#$%^&*()+`'\"[]{};<>,?/\\|=
# carbon2_sanitize_replace_char = ":"
```
Standard form:
```text
metric=name field=field_1 host=foo 30 1234567890
metric=name field=field_2 host=foo 4 1234567890
metric=name field=field_N host=foo 59 1234567890
```
### Metrics format
`Carbon2` serializer has a configuration option - `carbon2_format` - to change how
metrics names are being constructed.
By default `metric` will only include the metric name and a separate field `field`
will contain the field name.
This is the behavior of `carbon2_format = "field_separate"` which is the default
behavior (even if unspecified).
Optionally user can opt in to change this to make the metric include the field name
after the `_`.
This is the behavior of `carbon2_format = "metric_includes_field"` which would
make the above example look like:
```text
metric=name_field_1 host=foo 30 1234567890
metric=name_field_2 host=foo 4 1234567890
metric=name_field_N host=foo 59 1234567890
```
### Metric name sanitization
In order to sanitize the metric name one can specify `carbon2_sanitize_replace_char`
in order to replace the following characters in the metric name:
```text
!@#$%^&*()+`'\"[]{};<>,?/\\|=
```
By default they will be replaced with `:`.
## Metrics
The serializer converts the metrics by creating `intrinsic_tags` using the combination of metric name and fields.
So, if one Telegraf metric has 4 fields, the `carbon2` output will be 4 separate metrics.
There will be a `metric` tag that represents the name of the metric and a `field` tag to represent the field.
## Example
If we take the following InfluxDB Line Protocol:
```text
weather,location=us-midwest,season=summer temperature=82,wind=100 1234567890
```
after serializing in Carbon2, the result would be:
```text
metric=weather field=temperature location=us-midwest season=summer 82 1234567890
metric=weather field=wind location=us-midwest season=summer 100 1234567890
```
## Fields and Tags with spaces
When a field key or tag key/value have spaces, spaces will be replaced with `_`.
## Tags with empty values
When a tag's value is empty, it will be replaced with `null`

View file

@ -0,0 +1,119 @@
package carbon2
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/serializers"
)
const sanitizedChars = "!@#$%^&*()+`'\"[]{};<>,?/\\|="
type Serializer struct {
Format string `toml:"carbon2_format"`
SanitizeReplaceChar string `toml:"carbon2_sanitize_replace_char"`
Log telegraf.Logger `toml:"-"`
sanitizeReplacer *strings.Replacer
template string
}
func (s *Serializer) Init() error {
if s.SanitizeReplaceChar == "" {
s.SanitizeReplaceChar = ":"
}
if len(s.SanitizeReplaceChar) > 1 {
return errors.New("sanitize replace char has to be a singular character")
}
// Create replacer to replacing all characters requiring sanitization with the user-specified replacement
pairs := make([]string, 0, 2*len(sanitizedChars))
for _, c := range sanitizedChars {
pairs = append(pairs, string(c), s.SanitizeReplaceChar)
}
s.sanitizeReplacer = strings.NewReplacer(pairs...)
switch s.Format {
case "", "field_separate":
s.Format = "field_separate"
s.template = "metric=%s field=%s "
case "metric_includes_field":
s.template = "metric=%s_%s "
default:
return fmt.Errorf("unknown carbon2 format: %s", s.Format)
}
return nil
}
func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) {
return s.createObject(metric), nil
}
func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
var batch bytes.Buffer
for _, metric := range metrics {
batch.Write(s.createObject(metric))
}
return batch.Bytes(), nil
}
func (s *Serializer) createObject(metric telegraf.Metric) []byte {
var m bytes.Buffer
for fieldName, fieldValue := range metric.Fields() {
if _, ok := fieldValue.(string); ok {
continue
}
name := s.sanitizeReplacer.Replace(metric.Name())
var value string
if v, ok := fieldValue.(bool); ok {
if v {
value = "1"
} else {
value = "0"
}
} else {
var err error
value, err = internal.ToString(fieldValue)
if err != nil {
s.Log.Warnf("Cannot convert %v (%T) to string", fieldValue, fieldValue)
continue
}
}
m.WriteString(fmt.Sprintf(s.template, strings.ReplaceAll(name, " ", "_"), strings.ReplaceAll(fieldName, " ", "_")))
for _, tag := range metric.TagList() {
m.WriteString(strings.ReplaceAll(tag.Key, " ", "_"))
m.WriteString("=")
value := tag.Value
if len(value) == 0 {
value = "null"
}
m.WriteString(strings.ReplaceAll(value, " ", "_"))
m.WriteString(" ")
}
m.WriteString(" ")
m.WriteString(value)
m.WriteString(" ")
m.WriteString(strconv.FormatInt(metric.Time().Unix(), 10))
m.WriteString("\n")
}
return m.Bytes()
}
func init() {
serializers.Add("carbon2",
func() telegraf.Serializer {
return &Serializer{}
},
)
}

View file

@ -0,0 +1,442 @@
package carbon2
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/serializers"
)
func TestSerializeMetricFloat(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
}
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
m := metric.New("cpu", tags, fields, now)
testcases := []struct {
format string
expected string
}{
{
format: "field_separate",
expected: fmt.Sprintf("metric=cpu field=usage_idle cpu=cpu0 91.5 %d\n", now.Unix()),
},
{
format: "metric_includes_field",
expected: fmt.Sprintf("metric=cpu_usage_idle cpu=cpu0 91.5 %d\n", now.Unix()),
},
}
for _, tc := range testcases {
t.Run(tc.format, func(t *testing.T) {
s := &Serializer{
Format: tc.format,
}
require.NoError(t, s.Init())
buf, err := s.Serialize(m)
require.NoError(t, err)
require.Equal(t, tc.expected, string(buf))
})
}
}
func TestSerializeMetricWithEmptyStringTag(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "",
}
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
m := metric.New("cpu", tags, fields, now)
testcases := []struct {
format string
expected string
}{
{
format: "field_separate",
expected: fmt.Sprintf("metric=cpu field=usage_idle cpu=null 91.5 %d\n", now.Unix()),
},
{
format: "metric_includes_field",
expected: fmt.Sprintf("metric=cpu_usage_idle cpu=null 91.5 %d\n", now.Unix()),
},
}
for _, tc := range testcases {
t.Run(tc.format, func(t *testing.T) {
s := &Serializer{
Format: tc.format,
}
require.NoError(t, s.Init())
buf, err := s.Serialize(m)
require.NoError(t, err)
require.Equal(t, tc.expected, string(buf))
})
}
}
func TestSerializeWithSpaces(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu 0": "cpu 0",
}
fields := map[string]interface{}{
"usage_idle 1": float64(91.5),
}
m := metric.New("cpu metric", tags, fields, now)
testcases := []struct {
format string
expected string
}{
{
format: "field_separate",
expected: fmt.Sprintf("metric=cpu_metric field=usage_idle_1 cpu_0=cpu_0 91.5 %d\n", now.Unix()),
},
{
format: "metric_includes_field",
expected: fmt.Sprintf("metric=cpu_metric_usage_idle_1 cpu_0=cpu_0 91.5 %d\n", now.Unix()),
},
}
for _, tc := range testcases {
t.Run(tc.format, func(t *testing.T) {
s := &Serializer{
Format: tc.format,
}
require.NoError(t, s.Init())
buf, err := s.Serialize(m)
require.NoError(t, err)
require.Equal(t, tc.expected, string(buf))
})
}
}
func TestSerializeMetricInt(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
}
fields := map[string]interface{}{
"usage_idle": int64(90),
}
m := metric.New("cpu", tags, fields, now)
testcases := []struct {
format string
expected string
}{
{
format: "field_separate",
expected: fmt.Sprintf("metric=cpu field=usage_idle cpu=cpu0 90 %d\n", now.Unix()),
},
{
format: "metric_includes_field",
expected: fmt.Sprintf("metric=cpu_usage_idle cpu=cpu0 90 %d\n", now.Unix()),
},
}
for _, tc := range testcases {
t.Run(tc.format, func(t *testing.T) {
s := &Serializer{
Format: tc.format,
}
require.NoError(t, s.Init())
buf, err := s.Serialize(m)
require.NoError(t, err)
require.Equal(t, tc.expected, string(buf))
})
}
}
func TestSerializeMetricString(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
}
fields := map[string]interface{}{
"usage_idle": "foobar",
}
m := metric.New("cpu", tags, fields, now)
testcases := []struct {
format string
expected string
}{
{
format: "field_separate",
expected: "",
},
{
format: "metric_includes_field",
expected: "",
},
}
for _, tc := range testcases {
t.Run(tc.format, func(t *testing.T) {
s := &Serializer{
Format: tc.format,
}
require.NoError(t, s.Init())
buf, err := s.Serialize(m)
require.NoError(t, err)
require.Equal(t, tc.expected, string(buf))
})
}
}
func TestSerializeMetricBool(t *testing.T) {
requireMetric := func(tim time.Time, value bool) telegraf.Metric {
tags := map[string]string{
"tag_name": "tag_value",
}
fields := map[string]interface{}{
"java_lang_GarbageCollector_Valid": value,
}
m := metric.New("cpu", tags, fields, tim)
return m
}
now := time.Now()
testcases := []struct {
metric telegraf.Metric
format string
expected string
}{
{
metric: requireMetric(now, false),
format: "field_separate",
expected: fmt.Sprintf("metric=cpu field=java_lang_GarbageCollector_Valid tag_name=tag_value 0 %d\n", now.Unix()),
},
{
metric: requireMetric(now, false),
format: "metric_includes_field",
expected: fmt.Sprintf("metric=cpu_java_lang_GarbageCollector_Valid tag_name=tag_value 0 %d\n", now.Unix()),
},
{
metric: requireMetric(now, true),
format: "field_separate",
expected: fmt.Sprintf("metric=cpu field=java_lang_GarbageCollector_Valid tag_name=tag_value 1 %d\n", now.Unix()),
},
{
metric: requireMetric(now, true),
format: "metric_includes_field",
expected: fmt.Sprintf("metric=cpu_java_lang_GarbageCollector_Valid tag_name=tag_value 1 %d\n", now.Unix()),
},
}
for _, tc := range testcases {
t.Run(tc.format, func(t *testing.T) {
s := &Serializer{
Format: tc.format,
}
require.NoError(t, s.Init())
buf, err := s.Serialize(tc.metric)
require.NoError(t, err)
require.Equal(t, tc.expected, string(buf))
})
}
}
func TestSerializeBatch(t *testing.T) {
m := metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
)
metrics := []telegraf.Metric{m, m}
testcases := []struct {
format string
expected string
}{
{
format: "field_separate",
expected: `metric=cpu field=value 42 0
metric=cpu field=value 42 0
`,
},
{
format: "metric_includes_field",
expected: `metric=cpu_value 42 0
metric=cpu_value 42 0
`,
},
}
for _, tc := range testcases {
t.Run(tc.format, func(t *testing.T) {
s := &Serializer{
Format: tc.format,
}
require.NoError(t, s.Init())
buf, err := s.SerializeBatch(metrics)
require.NoError(t, err)
require.Equal(t, tc.expected, string(buf))
})
}
}
func TestSerializeMetricIsProperlySanitized(t *testing.T) {
now := time.Now()
testcases := []struct {
metricFunc func() telegraf.Metric
format string
expected string
replaceChar string
expectedErr bool
}{
{
metricFunc: func() telegraf.Metric {
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
return metric.New("cpu=1", nil, fields, now)
},
format: "field_separate",
expected: fmt.Sprintf("metric=cpu:1 field=usage_idle 91.5 %d\n", now.Unix()),
},
{
metricFunc: func() telegraf.Metric {
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
return metric.New("cpu=1", nil, fields, now)
},
format: "field_separate",
expected: fmt.Sprintf("metric=cpu_1 field=usage_idle 91.5 %d\n", now.Unix()),
replaceChar: "_",
},
{
metricFunc: func() telegraf.Metric {
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
return metric.New("cpu=1=tmp$custom", nil, fields, now)
},
format: "field_separate",
expected: fmt.Sprintf("metric=cpu:1:tmp:custom field=usage_idle 91.5 %d\n", now.Unix()),
},
{
metricFunc: func() telegraf.Metric {
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
return metric.New("cpu=1=tmp$custom%namespace", nil, fields, now)
},
format: "field_separate",
expected: fmt.Sprintf("metric=cpu:1:tmp:custom:namespace field=usage_idle 91.5 %d\n", now.Unix()),
},
{
metricFunc: func() telegraf.Metric {
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
return metric.New("cpu=1=tmp$custom%namespace", nil, fields, now)
},
format: "metric_includes_field",
expected: fmt.Sprintf("metric=cpu:1:tmp:custom:namespace_usage_idle 91.5 %d\n", now.Unix()),
},
{
metricFunc: func() telegraf.Metric {
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
return metric.New("cpu=1=tmp$custom%namespace", nil, fields, now)
},
format: "metric_includes_field",
expected: fmt.Sprintf("metric=cpu_1_tmp_custom_namespace_usage_idle 91.5 %d\n", now.Unix()),
replaceChar: "_",
},
{
metricFunc: func() telegraf.Metric {
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
return metric.New("cpu=1=tmp$custom%namespace", nil, fields, now)
},
format: "metric_includes_field",
expectedErr: true,
replaceChar: "___",
},
}
for _, tc := range testcases {
t.Run(tc.format, func(t *testing.T) {
m := tc.metricFunc()
s := &Serializer{
Format: tc.format,
SanitizeReplaceChar: tc.replaceChar,
}
err := s.Init()
if tc.expectedErr {
require.Error(t, err)
return
}
require.NoError(t, err)
buf, err := s.Serialize(m)
require.NoError(t, err)
require.Equal(t, tc.expected, string(buf))
})
}
}
func BenchmarkSerialize(b *testing.B) {
s := &Serializer{}
require.NoError(b, s.Init())
metrics := serializers.BenchmarkMetrics(b)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.Serialize(metrics[i%len(metrics)])
require.NoError(b, err)
}
}
func BenchmarkSerializeBatch(b *testing.B) {
s := &Serializer{}
require.NoError(b, s.Init())
m := serializers.BenchmarkMetrics(b)
metrics := m[:]
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.SerializeBatch(metrics)
require.NoError(b, err)
}
}

View file

@ -0,0 +1,64 @@
# CloudEvents Serializer
The `cloudevents` data format outputs metrics as [CloudEvents][CloudEvents] in
[JSON format][JSON Spec]. Currently, versions v1.0 and v0.3 of the specification
are supported with the former being the default.
[CloudEvents]: https://cloudevents.io
[JSON Spec]: https://github.com/cloudevents/spec/blob/v1.0/json-format.md
## Configuration
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file
files = ["stdout", "/tmp/metrics.out"]
## Data format to output
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "cloudevents"
## Specification version to use for events
## Currently versions "0.3" and "1.0" are supported.
# cloudevents_version = "1.0"
## Event source specifier
## This allows to overwrite the source header-field with the given value.
# cloudevents_source = "telegraf"
## Tag to use as event source specifier
## This allows to overwrite the source header-field with the value of the
## specified tag. If both 'cloudevents_source' and 'cloudevents_source_tag'
## are set, the this setting will take precedence. In case the specified tag
## value does not exist for a metric, the serializer will fallback to
## 'cloudevents_source'.
# cloudevents_source_tag = ""
## Event-type specifier to overwrite the default value
## By default, events (and event batches) containing a single metric will
## set the event-type to 'com.influxdata.telegraf.metric' while events
## containing a batch of metrics will set the event-type to
## 'com.influxdata.telegraf.metric' (plural).
# cloudevents_event_type = ""
## Set time header of the event
## Supported values are:
## none -- do not set event time
## earliest -- use timestamp of the earliest metric
## latest -- use timestamp of the latest metric
## creation -- use timestamp of event creation
## For events containing only a single metric, earliest and latest are
## equivalent.
# cloudevents_event_time = "latest"
## Batch format of the output when running in batch mode
## If set to 'events' the resulting output will contain a list of events,
## each with a single metric according to the JSON Batch Format of the
## specification. Use 'application/cloudevents-batch+json' for this format.
##
## When set to 'metrics', a single event will be generated containing a list
## of metrics as payload. Use 'application/cloudevents+json' for this format.
# cloudevents_batch_format = "events"
```

View file

@ -0,0 +1,199 @@
package cloudevents
import (
"encoding/json"
"errors"
"fmt"
"time"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/cloudevents/sdk-go/v2/event"
"github.com/gofrs/uuid/v5"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/serializers"
)
const (
EventTypeSingle = "com.influxdata.telegraf.metric"
EventTypeBatch = "com.influxdata.telegraf.metrics"
)
type Serializer struct {
Version string `toml:"cloudevents_version"`
Source string `toml:"cloudevents_source"`
SourceTag string `toml:"cloudevents_source_tag"`
EventType string `toml:"cloudevents_event_type"`
EventTime string `toml:"cloudevents_event_time"`
BatchFormat string `toml:"cloudevents_batch_format"`
Log telegraf.Logger `toml:"-"`
idgen uuid.Generator
}
func (s *Serializer) Init() error {
switch s.Version {
case "":
s.Version = event.CloudEventsVersionV1
case event.CloudEventsVersionV03, event.CloudEventsVersionV1:
default:
return errors.New("invalid 'cloudevents_version'")
}
switch s.EventTime {
case "":
s.EventTime = "latest"
case "none", "earliest", "latest", "creation":
default:
return errors.New("invalid 'cloudevents_event_time'")
}
switch s.BatchFormat {
case "":
s.BatchFormat = "events"
case "metrics", "events":
default:
return errors.New("invalid 'cloudevents_batch_format'")
}
if s.Source == "" {
s.Source = "telegraf"
}
s.idgen = uuid.NewGen()
return nil
}
func (s *Serializer) Serialize(m telegraf.Metric) ([]byte, error) {
// Create the event that forms the envelop around the metric
evt, err := s.createEvent(m)
if err != nil {
return nil, err
}
return evt.MarshalJSON()
}
func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
switch s.BatchFormat {
case "metrics":
return s.batchMetrics(metrics)
case "events":
return s.batchEvents(metrics)
}
return nil, fmt.Errorf("unexpected batch-format %q", s.BatchFormat)
}
func (s *Serializer) batchMetrics(metrics []telegraf.Metric) ([]byte, error) {
// Determine the necessary information
eventType := EventTypeBatch
if s.EventType != "" {
eventType = s.EventType
}
id, err := s.idgen.NewV1()
if err != nil {
return nil, fmt.Errorf("generating ID failed: %w", err)
}
// Serialize the metrics
var earliest, latest time.Time
data := make([]map[string]interface{}, 0, len(metrics))
for _, m := range metrics {
ts := m.Time()
data = append(data, map[string]interface{}{
"name": m.Name(),
"tags": m.Tags(),
"fields": m.Fields(),
"timestamp": ts.UnixNano(),
})
if ts.Before(earliest) {
earliest = ts
}
if ts.After(latest) {
latest = ts
}
}
// Create the event that forms the envelop around the metric
evt := cloudevents.NewEvent(s.Version)
evt.SetSource(s.Source)
evt.SetID(id.String())
evt.SetType(eventType)
if err := evt.SetData(cloudevents.ApplicationJSON, data); err != nil {
return nil, fmt.Errorf("setting data failed: %w", err)
}
switch s.EventTime {
case "creation":
evt.SetTime(time.Now())
case "earliest":
evt.SetTime(earliest)
case "latest":
evt.SetTime(latest)
}
return json.Marshal(evt)
}
func (s *Serializer) batchEvents(metrics []telegraf.Metric) ([]byte, error) {
events := make([]*cloudevents.Event, 0, len(metrics))
for _, m := range metrics {
e, err := s.createEvent(m)
if err != nil {
s.Log.Errorf("Creating event for %v failed: %v", m, err)
continue
}
events = append(events, e)
}
return json.Marshal(events)
}
func (s *Serializer) createEvent(m telegraf.Metric) (*cloudevents.Event, error) {
// Determine the necessary information
source := s.Source
if s.SourceTag != "" {
if v, ok := m.GetTag(s.SourceTag); ok {
source = v
}
}
eventType := EventTypeSingle
if s.EventType != "" {
eventType = s.EventType
}
id, err := s.idgen.NewV1()
if err != nil {
return nil, fmt.Errorf("generating ID failed: %w", err)
}
// Serialize the metric
data := map[string]interface{}{
"name": m.Name(),
"tags": m.Tags(),
"fields": m.Fields(),
"timestamp": m.Time().UnixNano(),
}
// Create the event that forms the envelop around the metric
evt := cloudevents.NewEvent(s.Version)
evt.SetSource(source)
evt.SetID(id.String())
evt.SetType(eventType)
if err := evt.SetData(cloudevents.ApplicationJSON, data); err != nil {
return nil, fmt.Errorf("setting data failed: %w", err)
}
switch s.EventTime {
case "creation":
evt.SetTime(time.Now())
case "earliest", "latest":
evt.SetTime(m.Time())
}
return &evt, nil
}
func init() {
serializers.Add("cloudevents",
func() telegraf.Serializer {
return &Serializer{}
},
)
}

View file

@ -0,0 +1,271 @@
package cloudevents
import (
"bytes"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/gofrs/uuid/v5"
"github.com/santhosh-tekuri/jsonschema/v5"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/models"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/parsers/influx"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/telegraf/testutil"
)
func TestCases(t *testing.T) {
// Get all directories in testcases
folders, err := os.ReadDir("testcases")
require.NoError(t, err)
// Make sure tests contains data
require.NotEmpty(t, folders)
// Set up for file inputs
outputs.Add("dummy", func() telegraf.Output {
return &OutputDummy{}
})
for _, f := range folders {
// Only handle folders
if !f.IsDir() {
continue
}
fname := f.Name()
t.Run(fname, func(t *testing.T) {
testdataPath := filepath.Join("testcases", fname)
configFilename := filepath.Join(testdataPath, "telegraf.conf")
inputFilename := filepath.Join(testdataPath, "input.influx")
expectedFilename := filepath.Join(testdataPath, "expected.json")
// Get parser to parse input and expected output
parser := &influx.Parser{}
require.NoError(t, parser.Init())
input, err := testutil.ParseMetricsFromFile(inputFilename, parser)
require.NoError(t, err)
var expected []map[string]interface{}
ebuf, err := os.ReadFile(expectedFilename)
require.NoError(t, err)
require.NoError(t, json.Unmarshal(ebuf, &expected))
// Configure the plugin
cfg := config.NewConfig()
require.NoError(t, cfg.LoadConfig(configFilename))
require.Len(t, cfg.Outputs, 1, "wrong number of outputs")
plugin, ok := cfg.Outputs[0].Output.(*OutputDummy)
require.True(t, ok)
serializer, ok := plugin.serializer.(*models.RunningSerializer).Serializer.(*Serializer)
require.True(t, ok)
serializer.idgen = &dummygen{}
// Write input and compare with expected metrics
require.NoError(t, plugin.Write(input))
require.NoError(t, checkEvents(plugin.output))
var joined string
switch len(plugin.output) {
case 0:
require.Emptyf(t, expected, "empty output but %d expected", len(expected))
case 1:
joined = string(plugin.output[0])
if !strings.HasPrefix(joined, "[") {
joined = "[" + joined + "]"
}
default:
joined = "[" + string(bytes.Join(plugin.output, []byte(","))) + "]"
}
var actual []map[string]interface{}
require.NoError(t, json.Unmarshal([]byte(joined), &actual))
require.Len(t, actual, len(expected))
require.ElementsMatch(t, expected, actual)
})
}
}
/* Internal testing functions */
func unmarshalEvents(messages [][]byte) ([]cloudevents.Event, error) {
var events []cloudevents.Event
for i, msg := range messages {
// Check for batch settings
var es []cloudevents.Event
if err := json.Unmarshal(msg, &es); err != nil {
if errors.Is(err, &json.UnmarshalTypeError{}) {
return nil, fmt.Errorf("message %d: %w", i, err)
}
var e cloudevents.Event
if err := json.Unmarshal(msg, &e); err != nil {
return nil, fmt.Errorf("message %d: %w", i, err)
}
events = append(events, e)
} else {
events = append(events, es...)
}
}
return events, nil
}
func checkEvents(messages [][]byte) error {
events, err := unmarshalEvents(messages)
if err != nil {
return err
}
for i, e := range events {
if err := e.Validate(); err != nil {
return fmt.Errorf("event %d: %w", i, err)
}
// Do an additional schema validation
var schema *jsonschema.Schema
switch e.SpecVersion() {
case "0.3":
schema = jsonschema.MustCompile("testcases/cloudevents-v0.3-schema.json")
case "1.0":
schema = jsonschema.MustCompile("testcases/cloudevents-v1.0-schema.json")
default:
return fmt.Errorf("unhandled spec version %q in event %d", e.SpecVersion(), i)
}
serializedEvent, err := json.Marshal(e)
if err != nil {
return fmt.Errorf("serializing raw event %d: %w", i, err)
}
var rawEvent interface{}
if err := json.Unmarshal(serializedEvent, &rawEvent); err != nil {
return fmt.Errorf("deserializing raw event %d: %w", i, err)
}
if err := schema.Validate(rawEvent); err != nil {
return fmt.Errorf("validation of event %d: %w", i, err)
}
}
return nil
}
/* Dummy output to allow full config parsing loop */
type OutputDummy struct {
Batch bool `toml:"batch"`
serializer telegraf.Serializer
output [][]byte
}
func (*OutputDummy) SampleConfig() string {
return "dummy"
}
func (o *OutputDummy) Connect() error {
o.output = make([][]byte, 0)
return nil
}
func (*OutputDummy) Close() error {
return nil
}
func (o *OutputDummy) Write(metrics []telegraf.Metric) error {
if o.Batch {
buf, err := o.serializer.SerializeBatch(metrics)
if err != nil {
return err
}
o.output = append(o.output, buf)
} else {
for _, m := range metrics {
buf, err := o.serializer.Serialize(m)
if err != nil {
return err
}
o.output = append(o.output, buf)
}
}
return nil
}
func (o *OutputDummy) SetSerializer(s telegraf.Serializer) {
o.serializer = s
}
/* Dummy UUID generator to get predictable UUIDs for testing */
const testid = "845f6acae52a11ed9976d8bbc1a4a0c6"
type dummygen struct{}
func (*dummygen) NewV1() (uuid.UUID, error) {
id, err := hex.DecodeString(testid)
if err != nil {
return uuid.UUID([16]byte{}), err
}
return uuid.UUID(id), nil
}
func (*dummygen) NewV3(_ uuid.UUID, _ string) uuid.UUID {
return uuid.UUID([16]byte{})
}
func (*dummygen) NewV4() (uuid.UUID, error) {
return uuid.UUID([16]byte{}), errors.New("wrong type")
}
func (*dummygen) NewV5(_ uuid.UUID, _ string) uuid.UUID {
return uuid.UUID([16]byte{})
}
func (*dummygen) NewV6() (uuid.UUID, error) {
return uuid.UUID([16]byte{}), errors.New("wrong type")
}
func (*dummygen) NewV7() (uuid.UUID, error) {
return uuid.UUID([16]byte{}), errors.New("wrong type")
}
func (*dummygen) NewV1AtTime(_ time.Time) (uuid.UUID, error) {
return uuid.UUID([16]byte{}), errors.New("wrong type")
}
func (*dummygen) NewV6AtTime(_ time.Time) (uuid.UUID, error) {
return uuid.UUID([16]byte{}), errors.New("wrong type")
}
func (*dummygen) NewV7AtTime(_ time.Time) (uuid.UUID, error) {
return uuid.UUID([16]byte{}), errors.New("wrong type")
}
func BenchmarkSerialize(b *testing.B) {
s := &Serializer{}
require.NoError(b, s.Init())
metrics := serializers.BenchmarkMetrics(b)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.Serialize(metrics[i%len(metrics)])
require.NoError(b, err)
}
}
func BenchmarkSerializeBatch(b *testing.B) {
s := &Serializer{}
require.NoError(b, s.Init())
m := serializers.BenchmarkMetrics(b)
metrics := m[:]
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.SerializeBatch(metrics)
require.NoError(b, err)
}
}

View file

@ -0,0 +1,478 @@
[
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu0",
"host": "Hugin"
},
"timestamp": 1682613051000000000
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000001Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu1",
"host": "Hugin"
},
"timestamp": 1682613051000000001
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000002Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu2",
"host": "Hugin"
},
"timestamp": 1682613051000000002
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000003Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu3",
"host": "Hugin"
},
"timestamp": 1682613051000000003
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000004Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu4",
"host": "Hugin"
},
"timestamp": 1682613051000000004
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000005Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu5",
"host": "Hugin"
},
"timestamp": 1682613051000000005
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000006Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu6",
"host": "Hugin"
},
"timestamp": 1682613051000000006
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000007Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu7",
"host": "Hugin"
},
"timestamp": 1682613051000000007
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000008Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu8",
"host": "Hugin"
},
"timestamp": 1682613051000000008
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000009Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu9",
"host": "Hugin"
},
"timestamp": 1682613051000000009
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.00000001Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu10",
"host": "Hugin"
},
"timestamp": 1682613051000000010
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000011Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu11",
"host": "Hugin"
},
"timestamp": 1682613051000000011
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000012Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu12",
"host": "Hugin"
},
"timestamp": 1682613051000000012
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000013Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu13",
"host": "Hugin"
},
"timestamp": 1682613051000000013
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000014Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu14",
"host": "Hugin"
},
"timestamp": 1682613051000000014
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000015Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu15",
"host": "Hugin"
},
"timestamp": 1682613051000000015
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000999Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 99.62546816517232,
"usage_iowait": 0,
"usage_irq": 0.12484394506911513,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0.12484394506840547,
"usage_user": 0.12484394507124409
},
"name": "cpu",
"tags": {
"cpu": "cpu-total",
"host": "Hugin"
},
"timestamp": 1682613051000000999
}
}
]

View file

@ -0,0 +1,17 @@
cpu,cpu=cpu0,host=Hugin usage_softirq=0,usage_steal=0,usage_guest=0,usage_user=0,usage_idle=100,usage_nice=0,usage_iowait=0,usage_system=0,usage_irq=0,usage_guest_nice=0 1682613051000000000
cpu,cpu=cpu1,host=Hugin usage_user=0,usage_idle=100,usage_softirq=0,usage_steal=0,usage_guest_nice=0,usage_system=0,usage_nice=0,usage_iowait=0,usage_irq=0,usage_guest=0 1682613051000000001
cpu,cpu=cpu2,host=Hugin usage_system=0,usage_idle=100,usage_softirq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_user=0,usage_nice=0,usage_iowait=0,usage_irq=0 1682613051000000002
cpu,cpu=cpu3,host=Hugin usage_idle=100,usage_nice=0,usage_iowait=0,usage_irq=0,usage_user=0,usage_system=0,usage_softirq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0 1682613051000000003
cpu,cpu=cpu4,host=Hugin usage_user=0,usage_steal=0,usage_system=0,usage_idle=100,usage_nice=0,usage_iowait=0,usage_irq=0,usage_softirq=0,usage_guest=0,usage_guest_nice=0 1682613051000000004
cpu,cpu=cpu5,host=Hugin usage_user=0,usage_nice=0,usage_irq=0,usage_softirq=0,usage_guest_nice=0,usage_system=0,usage_idle=100,usage_iowait=0,usage_steal=0,usage_guest=0 1682613051000000005
cpu,cpu=cpu6,host=Hugin usage_user=0,usage_system=0,usage_iowait=0,usage_softirq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_nice=0,usage_irq=0 1682613051000000006
cpu,cpu=cpu7,host=Hugin usage_system=0,usage_iowait=0,usage_softirq=0,usage_user=0,usage_nice=0,usage_irq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_idle=100 1682613051000000007
cpu,cpu=cpu8,host=Hugin usage_guest_nice=0,usage_user=0,usage_iowait=0,usage_irq=0,usage_softirq=0,usage_guest=0,usage_system=0,usage_idle=100,usage_nice=0,usage_steal=0 1682613051000000008
cpu,cpu=cpu9,host=Hugin usage_user=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_guest_nice=0,usage_system=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_guest=0 1682613051000000009
cpu,cpu=cpu10,host=Hugin usage_softirq=0,usage_steal=0,usage_guest=0,usage_nice=0,usage_system=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_guest_nice=0,usage_user=0 1682613051000000010
cpu,cpu=cpu11,host=Hugin usage_guest=0,usage_guest_nice=0,usage_user=0,usage_idle=100,usage_nice=0,usage_iowait=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_irq=0 1682613051000000011
cpu,cpu=cpu12,host=Hugin usage_softirq=0,usage_steal=0,usage_guest_nice=0,usage_user=0,usage_system=0,usage_nice=0,usage_irq=0,usage_idle=100,usage_iowait=0,usage_guest=0 1682613051000000012
cpu,cpu=cpu13,host=Hugin usage_iowait=0,usage_guest=0,usage_guest_nice=0,usage_steal=0,usage_user=0,usage_system=0,usage_idle=100,usage_nice=0,usage_irq=0,usage_softirq=0 1682613051000000013
cpu,cpu=cpu14,host=Hugin usage_system=0,usage_idle=100,usage_softirq=0,usage_steal=0,usage_guest=0,usage_user=0,usage_nice=0,usage_iowait=0,usage_irq=0,usage_guest_nice=0 1682613051000000014
cpu,cpu=cpu15,host=Hugin usage_user=0,usage_idle=100,usage_steal=0,usage_irq=0,usage_softirq=0,usage_guest=0,usage_guest_nice=0,usage_system=0,usage_nice=0,usage_iowait=0 1682613051000000015
cpu,cpu=cpu-total,host=Hugin usage_idle=99.62546816517232,usage_irq=0.12484394506911513,usage_softirq=0,usage_guest_nice=0,usage_steal=0,usage_guest=0,usage_user=0.12484394507124409,usage_system=0.12484394506840547,usage_nice=0,usage_iowait=0 1682613051000000999

View file

@ -0,0 +1,3 @@
[[outputs.dummy]]
batch = true
data_format = "cloudevents"

View file

@ -0,0 +1,112 @@
[
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metrics",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000999Z",
"data": [
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu0",
"host": "Hugin"
},
"timestamp": 1682613051000000000
},
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu1",
"host": "Hugin"
},
"timestamp": 1682613051000000001
},
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu2",
"host": "Hugin"
},
"timestamp": 1682613051000000002
},
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu3",
"host": "Hugin"
},
"timestamp": 1682613051000000003
},
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 99.62546816517232,
"usage_iowait": 0,
"usage_irq": 0.12484394506911513,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0.12484394506840547,
"usage_user": 0.12484394507124409
},
"name": "cpu",
"tags": {
"cpu": "cpu-total",
"host": "Hugin"
},
"timestamp": 1682613051000000999
}
]
}
]

View file

@ -0,0 +1,5 @@
cpu,cpu=cpu0,host=Hugin usage_softirq=0,usage_steal=0,usage_guest=0,usage_user=0,usage_idle=100,usage_nice=0,usage_iowait=0,usage_system=0,usage_irq=0,usage_guest_nice=0 1682613051000000000
cpu,cpu=cpu1,host=Hugin usage_user=0,usage_idle=100,usage_softirq=0,usage_steal=0,usage_guest_nice=0,usage_system=0,usage_nice=0,usage_iowait=0,usage_irq=0,usage_guest=0 1682613051000000001
cpu,cpu=cpu2,host=Hugin usage_system=0,usage_idle=100,usage_softirq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_user=0,usage_nice=0,usage_iowait=0,usage_irq=0 1682613051000000002
cpu,cpu=cpu3,host=Hugin usage_idle=100,usage_nice=0,usage_iowait=0,usage_irq=0,usage_user=0,usage_system=0,usage_softirq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0 1682613051000000003
cpu,cpu=cpu-total,host=Hugin usage_idle=99.62546816517232,usage_irq=0.12484394506911513,usage_softirq=0,usage_guest_nice=0,usage_steal=0,usage_guest=0,usage_user=0.12484394507124409,usage_system=0.12484394506840547,usage_nice=0,usage_iowait=0 1682613051000000999

View file

@ -0,0 +1,5 @@
[[outputs.dummy]]
batch = true
data_format = "cloudevents"
cloudevents_batch_format = "metrics"

View file

@ -0,0 +1,79 @@
{
"$ref": "#/definitions/event",
"definitions": {
"specversion": {
"type": "string",
"minLength": 1
},
"datacontenttype": {
"type": "string"
},
"data": {
"type": ["object", "string"]
},
"event": {
"properties": {
"specversion": {
"$ref": "#/definitions/specversion"
},
"datacontenttype": {
"$ref": "#/definitions/datacontenttype"
},
"data": {
"$ref": "#/definitions/data"
},
"id": {
"$ref": "#/definitions/id"
},
"time": {
"$ref": "#/definitions/time"
},
"schemaurl": {
"$ref": "#/definitions/schemaurl"
},
"subject": {
"$ref": "#/definitions/subject"
},
"type": {
"$ref": "#/definitions/type"
},
"extensions": {
"$ref": "#/definitions/extensions"
},
"source": {
"$ref": "#/definitions/source"
}
},
"required": ["specversion", "id", "type", "source"],
"type": "object"
},
"id": {
"type": "string",
"minLength": 1
},
"time": {
"format": "date-time",
"type": "string"
},
"schemaurl": {
"type": "string",
"format": "uri-reference"
},
"subject": {
"type": "string",
"minLength": 1
},
"type": {
"type": "string",
"minLength": 1
},
"extensions": {
"type": "object"
},
"source": {
"format": "uri-reference",
"type": "string"
}
},
"type": "object"
}

View file

@ -0,0 +1,128 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "CloudEvents Specification JSON Schema",
"type": "object",
"properties": {
"id": {
"description": "Identifies the event.",
"$ref": "#/definitions/iddef",
"examples": [
"A234-1234-1234"
]
},
"source": {
"description": "Identifies the context in which an event happened.",
"$ref": "#/definitions/sourcedef",
"examples" : [
"https://github.com/cloudevents",
"mailto:cncf-wg-serverless@lists.cncf.io",
"urn:uuid:6e8bc430-9c3a-11d9-9669-0800200c9a66",
"cloudevents/spec/pull/123",
"/sensors/tn-1234567/alerts",
"1-555-123-4567"
]
},
"specversion": {
"description": "The version of the CloudEvents specification which the event uses.",
"$ref": "#/definitions/specversiondef",
"examples": [
"1.0"
]
},
"type": {
"description": "Describes the type of event related to the originating occurrence.",
"$ref": "#/definitions/typedef",
"examples" : [
"com.github.pull_request.opened",
"com.example.object.deleted.v2"
]
},
"datacontenttype": {
"description": "Content type of the data value. Must adhere to RFC 2046 format.",
"$ref": "#/definitions/datacontenttypedef",
"examples": [
"text/xml",
"application/json",
"image/png",
"multipart/form-data"
]
},
"dataschema": {
"description": "Identifies the schema that data adheres to.",
"$ref": "#/definitions/dataschemadef"
},
"subject": {
"description": "Describes the subject of the event in the context of the event producer (identified by source).",
"$ref": "#/definitions/subjectdef",
"examples": [
"mynewfile.jpg"
]
},
"time": {
"description": "Timestamp of when the occurrence happened. Must adhere to RFC 3339.",
"$ref": "#/definitions/timedef",
"examples": [
"2018-04-05T17:31:00Z"
]
},
"data": {
"description": "The event payload.",
"$ref": "#/definitions/datadef",
"examples": [
"<much wow=\"xml\"/>"
]
},
"data_base64": {
"description": "Base64 encoded event payload. Must adhere to RFC4648.",
"$ref": "#/definitions/data_base64def",
"examples": [
"Zm9vYg=="
]
}
},
"required": ["id", "source", "specversion", "type"],
"definitions": {
"iddef": {
"type": "string",
"minLength": 1
},
"sourcedef": {
"type": "string",
"format": "uri-reference",
"minLength": 1
},
"specversiondef": {
"type": "string",
"minLength": 1
},
"typedef": {
"type": "string",
"minLength": 1
},
"datacontenttypedef": {
"type": ["string", "null"],
"minLength": 1
},
"dataschemadef": {
"type": ["string", "null"],
"format": "uri",
"minLength": 1
},
"subjectdef": {
"type": ["string", "null"],
"minLength": 1
},
"timedef": {
"type": ["string", "null"],
"format": "date-time",
"minLength": 1
},
"datadef": {
"type": ["object", "string", "number", "array", "boolean", "null"]
},
"data_base64def": {
"type": ["string", "null"],
"contentEncoding": "base64"
}
}
}

View file

@ -0,0 +1,478 @@
[
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu0",
"host": "Hugin"
},
"timestamp": 1682613051000000000
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000001Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu1",
"host": "Hugin"
},
"timestamp": 1682613051000000001
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000002Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu2",
"host": "Hugin"
},
"timestamp": 1682613051000000002
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000003Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu3",
"host": "Hugin"
},
"timestamp": 1682613051000000003
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000004Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu4",
"host": "Hugin"
},
"timestamp": 1682613051000000004
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000005Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu5",
"host": "Hugin"
},
"timestamp": 1682613051000000005
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000006Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu6",
"host": "Hugin"
},
"timestamp": 1682613051000000006
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000007Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu7",
"host": "Hugin"
},
"timestamp": 1682613051000000007
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000008Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu8",
"host": "Hugin"
},
"timestamp": 1682613051000000008
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000009Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu9",
"host": "Hugin"
},
"timestamp": 1682613051000000009
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.00000001Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu10",
"host": "Hugin"
},
"timestamp": 1682613051000000010
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000011Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu11",
"host": "Hugin"
},
"timestamp": 1682613051000000011
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000012Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu12",
"host": "Hugin"
},
"timestamp": 1682613051000000012
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000013Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu13",
"host": "Hugin"
},
"timestamp": 1682613051000000013
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000014Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu14",
"host": "Hugin"
},
"timestamp": 1682613051000000014
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000015Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu15",
"host": "Hugin"
},
"timestamp": 1682613051000000015
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000999Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 99.62546816517232,
"usage_iowait": 0,
"usage_irq": 0.12484394506911513,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0.12484394506840547,
"usage_user": 0.12484394507124409
},
"name": "cpu",
"tags": {
"cpu": "cpu-total",
"host": "Hugin"
},
"timestamp": 1682613051000000999
}
}
]

View file

@ -0,0 +1,17 @@
cpu,cpu=cpu0,host=Hugin usage_softirq=0,usage_steal=0,usage_guest=0,usage_user=0,usage_idle=100,usage_nice=0,usage_iowait=0,usage_system=0,usage_irq=0,usage_guest_nice=0 1682613051000000000
cpu,cpu=cpu1,host=Hugin usage_user=0,usage_idle=100,usage_softirq=0,usage_steal=0,usage_guest_nice=0,usage_system=0,usage_nice=0,usage_iowait=0,usage_irq=0,usage_guest=0 1682613051000000001
cpu,cpu=cpu2,host=Hugin usage_system=0,usage_idle=100,usage_softirq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_user=0,usage_nice=0,usage_iowait=0,usage_irq=0 1682613051000000002
cpu,cpu=cpu3,host=Hugin usage_idle=100,usage_nice=0,usage_iowait=0,usage_irq=0,usage_user=0,usage_system=0,usage_softirq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0 1682613051000000003
cpu,cpu=cpu4,host=Hugin usage_user=0,usage_steal=0,usage_system=0,usage_idle=100,usage_nice=0,usage_iowait=0,usage_irq=0,usage_softirq=0,usage_guest=0,usage_guest_nice=0 1682613051000000004
cpu,cpu=cpu5,host=Hugin usage_user=0,usage_nice=0,usage_irq=0,usage_softirq=0,usage_guest_nice=0,usage_system=0,usage_idle=100,usage_iowait=0,usage_steal=0,usage_guest=0 1682613051000000005
cpu,cpu=cpu6,host=Hugin usage_user=0,usage_system=0,usage_iowait=0,usage_softirq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_nice=0,usage_irq=0 1682613051000000006
cpu,cpu=cpu7,host=Hugin usage_system=0,usage_iowait=0,usage_softirq=0,usage_user=0,usage_nice=0,usage_irq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_idle=100 1682613051000000007
cpu,cpu=cpu8,host=Hugin usage_guest_nice=0,usage_user=0,usage_iowait=0,usage_irq=0,usage_softirq=0,usage_guest=0,usage_system=0,usage_idle=100,usage_nice=0,usage_steal=0 1682613051000000008
cpu,cpu=cpu9,host=Hugin usage_user=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_guest_nice=0,usage_system=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_guest=0 1682613051000000009
cpu,cpu=cpu10,host=Hugin usage_softirq=0,usage_steal=0,usage_guest=0,usage_nice=0,usage_system=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_guest_nice=0,usage_user=0 1682613051000000010
cpu,cpu=cpu11,host=Hugin usage_guest=0,usage_guest_nice=0,usage_user=0,usage_idle=100,usage_nice=0,usage_iowait=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_irq=0 1682613051000000011
cpu,cpu=cpu12,host=Hugin usage_softirq=0,usage_steal=0,usage_guest_nice=0,usage_user=0,usage_system=0,usage_nice=0,usage_irq=0,usage_idle=100,usage_iowait=0,usage_guest=0 1682613051000000012
cpu,cpu=cpu13,host=Hugin usage_iowait=0,usage_guest=0,usage_guest_nice=0,usage_steal=0,usage_user=0,usage_system=0,usage_idle=100,usage_nice=0,usage_irq=0,usage_softirq=0 1682613051000000013
cpu,cpu=cpu14,host=Hugin usage_system=0,usage_idle=100,usage_softirq=0,usage_steal=0,usage_guest=0,usage_user=0,usage_nice=0,usage_iowait=0,usage_irq=0,usage_guest_nice=0 1682613051000000014
cpu,cpu=cpu15,host=Hugin usage_user=0,usage_idle=100,usage_steal=0,usage_irq=0,usage_softirq=0,usage_guest=0,usage_guest_nice=0,usage_system=0,usage_nice=0,usage_iowait=0 1682613051000000015
cpu,cpu=cpu-total,host=Hugin usage_idle=99.62546816517232,usage_irq=0.12484394506911513,usage_softirq=0,usage_guest_nice=0,usage_steal=0,usage_guest=0,usage_user=0.12484394507124409,usage_system=0.12484394506840547,usage_nice=0,usage_iowait=0 1682613051000000999

View file

@ -0,0 +1,2 @@
[[outputs.dummy]]
data_format = "cloudevents"

View file

@ -0,0 +1,30 @@
[
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "myownsource",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 99.62546816517232,
"usage_iowait": 0,
"usage_irq": 0.12484394506911513,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0.12484394506840547,
"usage_user": 0.12484394507124409
},
"name": "cpu",
"tags": {
"cpu": "cpu-total",
"host": "Hugin"
},
"timestamp": 1682613051000000000
},
"time": "2023-04-27T16:30:51Z"
}
]

View file

@ -0,0 +1 @@
cpu,cpu=cpu-total,host=Hugin usage_idle=99.62546816517232,usage_irq=0.12484394506911513,usage_softirq=0,usage_guest_nice=0,usage_steal=0,usage_guest=0,usage_user=0.12484394507124409,usage_system=0.12484394506840547,usage_nice=0,usage_iowait=0 1682613051000000000

View file

@ -0,0 +1,3 @@
[[outputs.dummy]]
data_format = "cloudevents"
cloudevents_source = "myownsource"

View file

@ -0,0 +1,142 @@
[
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "cpu0",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu0",
"host": "Hugin"
},
"timestamp": 1682613051000000000
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "cpu1",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000001Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu1",
"host": "Hugin"
},
"timestamp": 1682613051000000001
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "cpu2",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000002Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu2",
"host": "Hugin"
},
"timestamp": 1682613051000000002
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "cpu3",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000003Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 100,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu3",
"host": "Hugin"
},
"timestamp": 1682613051000000003
}
},
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "cpu-total",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"time": "2023-04-27T16:30:51.000000999Z",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 99.62546816517232,
"usage_iowait": 0,
"usage_irq": 0.12484394506911513,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0.12484394506840547,
"usage_user": 0.12484394507124409
},
"name": "cpu",
"tags": {
"cpu": "cpu-total",
"host": "Hugin"
},
"timestamp": 1682613051000000999
}
}
]

View file

@ -0,0 +1,5 @@
cpu,cpu=cpu0,host=Hugin usage_softirq=0,usage_steal=0,usage_guest=0,usage_user=0,usage_idle=100,usage_nice=0,usage_iowait=0,usage_system=0,usage_irq=0,usage_guest_nice=0 1682613051000000000
cpu,cpu=cpu1,host=Hugin usage_user=0,usage_idle=100,usage_softirq=0,usage_steal=0,usage_guest_nice=0,usage_system=0,usage_nice=0,usage_iowait=0,usage_irq=0,usage_guest=0 1682613051000000001
cpu,cpu=cpu2,host=Hugin usage_system=0,usage_idle=100,usage_softirq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_user=0,usage_nice=0,usage_iowait=0,usage_irq=0 1682613051000000002
cpu,cpu=cpu3,host=Hugin usage_idle=100,usage_nice=0,usage_iowait=0,usage_irq=0,usage_user=0,usage_system=0,usage_softirq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0 1682613051000000003
cpu,cpu=cpu-total,host=Hugin usage_idle=99.62546816517232,usage_irq=0.12484394506911513,usage_softirq=0,usage_guest_nice=0,usage_steal=0,usage_guest=0,usage_user=0.12484394507124409,usage_system=0.12484394506840547,usage_nice=0,usage_iowait=0 1682613051000000999

View file

@ -0,0 +1,3 @@
[[outputs.dummy]]
data_format = "cloudevents"
cloudevents_source_tag = "cpu"

View file

@ -0,0 +1,30 @@
[
{
"specversion": "1.0",
"id": "845f6aca-e52a-11ed-9976-d8bbc1a4a0c6",
"source": "telegraf",
"type": "com.influxdata.telegraf.metric",
"datacontenttype": "application/json",
"data": {
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 99.62546816517232,
"usage_iowait": 0,
"usage_irq": 0.12484394506911513,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0.12484394506840547,
"usage_user": 0.12484394507124409
},
"name": "cpu",
"tags": {
"cpu": "cpu-total",
"host": "Hugin"
},
"timestamp": 1682613051000000000
},
"time": "2023-04-27T16:30:51Z"
}
]

View file

@ -0,0 +1 @@
cpu,cpu=cpu-total,host=Hugin usage_idle=99.62546816517232,usage_irq=0.12484394506911513,usage_softirq=0,usage_guest_nice=0,usage_steal=0,usage_guest=0,usage_user=0.12484394507124409,usage_system=0.12484394506840547,usage_nice=0,usage_iowait=0 1682613051000000000

View file

@ -0,0 +1,2 @@
[[outputs.dummy]]
data_format = "cloudevents"

View file

@ -0,0 +1,69 @@
# CSV Serializer
The `csv` output data format converts metrics into CSV lines.
## Configuration
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "csv"
## The default timestamp format is Unix epoch time.
# Other timestamp layout can be configured using the Go language time
# layout specification from https://golang.org/pkg/time/#Time.Format
# e.g.: csv_timestamp_format = "2006-01-02T15:04:05Z07:00"
# csv_timestamp_format = "unix"
## The default separator for the CSV format.
# csv_separator = ","
## Output the CSV header in the first line.
## Enable the header when outputting metrics to a new file.
## Disable when appending to a file or when using a stateless
## output to prevent headers appearing between data lines.
# csv_header = false
## Prefix tag and field columns with "tag_" and "field_" respectively.
## This can be helpful if you need to know the "type" of a column.
# csv_column_prefix = false
## Use the specified order for the columns.
## This can be helpful if you need a specific output order. To specify tags,
## use a `tag.` prefix, for fields use a `field.` prefix and use `name` and
## `timestamp` to reference the measurement name and timestamp respectively.
## NOTE: The output will only contain the specified tags, fields, etc. All
## other data will be dropped. In case a tag or field does not exist,
## the column will be empty.
## ex. csv_columns = ["timestamp", "tag.host", "field.value"]
##
## By default all metric data will be written in the order:
## timestamp, name, tags..., fields...
## with tags and fields being ordered alphabetically.
# csv_columns = []
```
## Examples
Standard form:
```csv
1458229140,docker,raynor,30,4,...,59,660
```
When an output plugin needs to emit multiple metrics at one time, it may use
the batch format. The use of batch format is determined by the plugin,
reference the documentation for the specific plugin. With `csv_header = true`
you get
```csv
timestamp,measurement,host,field_1,field_2,...,field_N,n_images
1458229140,docker,raynor,30,4,...,59,660
1458229143,docker,raynor,28,5,...,60,665
```

View file

@ -0,0 +1,245 @@
package csv
import (
"bytes"
"encoding/csv"
"fmt"
"runtime"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/serializers"
)
type Serializer struct {
TimestampFormat string `toml:"csv_timestamp_format"`
Separator string `toml:"csv_separator"`
Header bool `toml:"csv_header"`
Prefix bool `toml:"csv_column_prefix"`
Columns []string `toml:"csv_columns"`
buffer bytes.Buffer
writer *csv.Writer
}
func (s *Serializer) Init() error {
// Setting defaults
if s.Separator == "" {
s.Separator = ","
}
// Check inputs
if len(s.Separator) > 1 {
return fmt.Errorf("invalid separator %q", s.Separator)
}
switch s.TimestampFormat {
case "":
s.TimestampFormat = "unix"
case "unix", "unix_ms", "unix_us", "unix_ns":
default:
if time.Now().Format(s.TimestampFormat) == s.TimestampFormat {
return fmt.Errorf("invalid timestamp format %q", s.TimestampFormat)
}
}
// Check columns if any
for _, name := range s.Columns {
switch {
case name == "timestamp", name == "name",
strings.HasPrefix(name, "tag."),
strings.HasPrefix(name, "field."):
default:
return fmt.Errorf("invalid column reference %q", name)
}
}
// Initialize the writer
s.writer = csv.NewWriter(&s.buffer)
s.writer.Comma, _ = utf8.DecodeRuneInString(s.Separator)
s.writer.UseCRLF = runtime.GOOS == "windows"
return nil
}
func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) {
return s.SerializeBatch([]telegraf.Metric{metric})
}
func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
if len(metrics) < 1 {
return nil, nil
}
// Clear the buffer
s.buffer.Truncate(0)
// Write the header if the user wants us to
if s.Header {
if len(s.Columns) > 0 {
if err := s.writeHeaderOrdered(); err != nil {
return nil, fmt.Errorf("writing header failed: %w", err)
}
} else {
if err := s.writeHeader(metrics[0]); err != nil {
return nil, fmt.Errorf("writing header failed: %w", err)
}
}
s.Header = false
}
for _, m := range metrics {
if len(s.Columns) > 0 {
if err := s.writeDataOrdered(m); err != nil {
return nil, fmt.Errorf("writing data failed: %w", err)
}
} else {
if err := s.writeData(m); err != nil {
return nil, fmt.Errorf("writing data failed: %w", err)
}
}
}
// Finish up
s.writer.Flush()
return s.buffer.Bytes(), nil
}
func (s *Serializer) writeHeader(metric telegraf.Metric) error {
columns := []string{
"timestamp",
"measurement",
}
for _, tag := range metric.TagList() {
if s.Prefix {
columns = append(columns, "tag_"+tag.Key)
} else {
columns = append(columns, tag.Key)
}
}
// Sort the fields by name
sort.Slice(metric.FieldList(), func(i, j int) bool {
return metric.FieldList()[i].Key < metric.FieldList()[j].Key
})
for _, field := range metric.FieldList() {
if s.Prefix {
columns = append(columns, "field_"+field.Key)
} else {
columns = append(columns, field.Key)
}
}
return s.writer.Write(columns)
}
func (s *Serializer) writeHeaderOrdered() error {
columns := make([]string, 0, len(s.Columns))
for _, name := range s.Columns {
if s.Prefix {
name = strings.ReplaceAll(name, ".", "_")
} else {
name = strings.TrimPrefix(name, "tag.")
name = strings.TrimPrefix(name, "field.")
}
columns = append(columns, name)
}
return s.writer.Write(columns)
}
func (s *Serializer) writeData(metric telegraf.Metric) error {
var timestamp string
// Format the time
switch s.TimestampFormat {
case "unix":
timestamp = strconv.FormatInt(metric.Time().Unix(), 10)
case "unix_ms":
timestamp = strconv.FormatInt(metric.Time().UnixNano()/1_000_000, 10)
case "unix_us":
timestamp = strconv.FormatInt(metric.Time().UnixNano()/1_000, 10)
case "unix_ns":
timestamp = strconv.FormatInt(metric.Time().UnixNano(), 10)
default:
timestamp = metric.Time().UTC().Format(s.TimestampFormat)
}
columns := []string{
timestamp,
metric.Name(),
}
for _, tag := range metric.TagList() {
columns = append(columns, tag.Value)
}
// Sort the fields by name
sort.Slice(metric.FieldList(), func(i, j int) bool {
return metric.FieldList()[i].Key < metric.FieldList()[j].Key
})
for _, field := range metric.FieldList() {
v, err := internal.ToString(field.Value)
if err != nil {
return fmt.Errorf("converting field %q to string failed: %w", field.Key, err)
}
columns = append(columns, v)
}
return s.writer.Write(columns)
}
func (s *Serializer) writeDataOrdered(metric telegraf.Metric) error {
var timestamp string
// Format the time
switch s.TimestampFormat {
case "unix":
timestamp = strconv.FormatInt(metric.Time().Unix(), 10)
case "unix_ms":
timestamp = strconv.FormatInt(metric.Time().UnixNano()/1_000_000, 10)
case "unix_us":
timestamp = strconv.FormatInt(metric.Time().UnixNano()/1_000, 10)
case "unix_ns":
timestamp = strconv.FormatInt(metric.Time().UnixNano(), 10)
default:
timestamp = metric.Time().UTC().Format(s.TimestampFormat)
}
columns := make([]string, 0, len(s.Columns))
for _, name := range s.Columns {
switch {
case name == "timestamp":
columns = append(columns, timestamp)
case name == "name":
columns = append(columns, metric.Name())
case strings.HasPrefix(name, "tag."):
v, _ := metric.GetTag(strings.TrimPrefix(name, "tag."))
columns = append(columns, v)
case strings.HasPrefix(name, "field."):
var v string
field := strings.TrimPrefix(name, "field.")
if raw, ok := metric.GetField(field); ok {
var err error
v, err = internal.ToString(raw)
if err != nil {
return fmt.Errorf("converting field %q to string failed: %w", field, err)
}
}
columns = append(columns, v)
}
}
return s.writer.Write(columns)
}
func init() {
serializers.Add("csv",
func() telegraf.Serializer {
return &Serializer{}
},
)
}

View file

@ -0,0 +1,261 @@
package csv
import (
"bytes"
"os"
"path/filepath"
"strings"
"testing"
"github.com/influxdata/toml"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/plugins/parsers/influx"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/telegraf/testutil"
)
func TestInvalidTimestampFormat(t *testing.T) {
s := Serializer{
TimestampFormat: "garbage",
}
require.EqualError(t, s.Init(), `invalid timestamp format "garbage"`)
}
func TestInvalidSeparator(t *testing.T) {
s := Serializer{
Separator: "garbage",
}
require.EqualError(t, s.Init(), `invalid separator "garbage"`)
s = Serializer{
Separator: "\n",
}
require.NoError(t, s.Init())
_, err := s.Serialize(testutil.TestMetric(42.3, "test"))
require.EqualError(t, err, "writing data failed: csv: invalid field or comment delimiter")
}
func TestSerializeTransformationNonBatch(t *testing.T) {
var tests = []struct {
name string
filename string
}{
{
name: "basic",
filename: "testcases/basic.conf",
},
{
name: "unix nanoseconds timestamp",
filename: "testcases/nanoseconds.conf",
},
{
name: "header",
filename: "testcases/header.conf",
},
{
name: "header with prefix",
filename: "testcases/prefix.conf",
},
{
name: "header and RFC3339 timestamp",
filename: "testcases/rfc3339.conf",
},
{
name: "header and semicolon",
filename: "testcases/semicolon.conf",
},
{
name: "ordered without header",
filename: "testcases/ordered.conf",
},
{
name: "ordered with header",
filename: "testcases/ordered_with_header.conf",
},
{
name: "ordered with header and prefix",
filename: "testcases/ordered_with_header_prefix.conf",
},
{
name: "ordered non-existing fields and tags",
filename: "testcases/ordered_not_exist.conf",
},
}
parser := &influx.Parser{}
require.NoError(t, parser.Init())
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
filename := filepath.FromSlash(tt.filename)
cfg, header, err := loadTestConfiguration(filename)
require.NoError(t, err)
// Get the input metrics
metrics, err := testutil.ParseMetricsFrom(header, "Input:", parser)
require.NoError(t, err)
// Get the expectations
expectedFn, err := testutil.ParseRawLinesFrom(header, "Output File:")
require.NoError(t, err)
require.Len(t, expectedFn, 1, "only a single output file is supported")
expected, err := loadCSV(expectedFn[0])
require.NoError(t, err)
// Serialize
serializer := Serializer{
TimestampFormat: cfg.TimestampFormat,
Separator: cfg.Separator,
Header: cfg.Header,
Prefix: cfg.Prefix,
Columns: cfg.Columns,
}
require.NoError(t, serializer.Init())
// expected results use LF endings
serializer.writer.UseCRLF = false
var actual bytes.Buffer
for _, m := range metrics {
buf, err := serializer.Serialize(m)
require.NoError(t, err)
_, err = actual.ReadFrom(bytes.NewReader(buf))
require.NoError(t, err)
}
// Compare
require.EqualValues(t, string(expected), actual.String())
})
}
}
func TestSerializeTransformationBatch(t *testing.T) {
var tests = []struct {
name string
filename string
}{
{
name: "basic",
filename: "testcases/basic.conf",
},
{
name: "unix nanoseconds timestamp",
filename: "testcases/nanoseconds.conf",
},
{
name: "header",
filename: "testcases/header.conf",
},
{
name: "header with prefix",
filename: "testcases/prefix.conf",
},
{
name: "header and RFC3339 timestamp",
filename: "testcases/rfc3339.conf",
},
{
name: "header and semicolon",
filename: "testcases/semicolon.conf",
},
{
name: "ordered without header",
filename: "testcases/ordered.conf",
},
{
name: "ordered with header",
filename: "testcases/ordered_with_header.conf",
},
{
name: "ordered with header and prefix",
filename: "testcases/ordered_with_header_prefix.conf",
},
{
name: "ordered non-existing fields and tags",
filename: "testcases/ordered_not_exist.conf",
},
}
parser := &influx.Parser{}
require.NoError(t, parser.Init())
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
filename := filepath.FromSlash(tt.filename)
cfg, header, err := loadTestConfiguration(filename)
require.NoError(t, err)
// Get the input metrics
metrics, err := testutil.ParseMetricsFrom(header, "Input:", parser)
require.NoError(t, err)
// Get the expectations
expectedFn, err := testutil.ParseRawLinesFrom(header, "Output File:")
require.NoError(t, err)
require.Len(t, expectedFn, 1, "only a single output file is supported")
expected, err := loadCSV(expectedFn[0])
require.NoError(t, err)
// Serialize
serializer := Serializer{
TimestampFormat: cfg.TimestampFormat,
Separator: cfg.Separator,
Header: cfg.Header,
Prefix: cfg.Prefix,
Columns: cfg.Columns,
}
require.NoError(t, serializer.Init())
// expected results use LF endings
serializer.writer.UseCRLF = false
actual, err := serializer.SerializeBatch(metrics)
require.NoError(t, err)
// Compare
require.EqualValues(t, string(expected), string(actual))
})
}
}
type Config Serializer
func loadTestConfiguration(filename string) (*Config, []string, error) {
buf, err := os.ReadFile(filename)
if err != nil {
return nil, nil, err
}
header := make([]string, 0)
for _, line := range strings.Split(string(buf), "\n") {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "#") {
header = append(header, line)
}
}
var cfg Config
err = toml.Unmarshal(buf, &cfg)
return &cfg, header, err
}
func loadCSV(filename string) ([]byte, error) {
return os.ReadFile(filename)
}
func BenchmarkSerialize(b *testing.B) {
s := &Serializer{}
require.NoError(b, s.Init())
metrics := serializers.BenchmarkMetrics(b)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.Serialize(metrics[i%len(metrics)])
require.NoError(b, err)
}
}
func BenchmarkSerializeBatch(b *testing.B) {
s := &Serializer{}
require.NoError(b, s.Init())
m := serializers.BenchmarkMetrics(b)
metrics := m[:]
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.SerializeBatch(metrics)
require.NoError(b, err)
}
}

View file

@ -0,0 +1,8 @@
# Example for outputting CSV
#
# Output File:
# testcases/basic.csv
#
# Input:
# impression,flagname=F5,host=1cbbb3796fc2,key=12345,platform=Java,sdkver=4.9.1,value=false count_sum=5i 1653643420000000000
# expression,flagname=E42,host=klaus,key=67890,platform=Golang,sdkver=1.18.3,value=true count_sum=42i 1653646789000000000

View file

@ -0,0 +1,2 @@
1653643420,impression,F5,1cbbb3796fc2,12345,Java,4.9.1,false,5
1653646789,expression,E42,klaus,67890,Golang,1.18.3,true,42
1 1653643420 impression F5 1cbbb3796fc2 12345 Java 4.9.1 false 5
2 1653646789 expression E42 klaus 67890 Golang 1.18.3 true 42

View file

@ -0,0 +1,10 @@
# Example for outputting CSV in non-batch mode.
#
# Output File:
# testcases/header.csv
#
# Input:
# impression,flagname=F5,host=1cbbb3796fc2,key=12345,platform=Java,sdkver=4.9.1,value=false count_sum=5i 1653643420000000000
# expression,flagname=E42,host=klaus,key=67890,platform=Golang,sdkver=1.18.3,value=true count_sum=42i 1653646789000000000
csv_header = true

View file

@ -0,0 +1,3 @@
timestamp,measurement,flagname,host,key,platform,sdkver,value,count_sum
1653643420,impression,F5,1cbbb3796fc2,12345,Java,4.9.1,false,5
1653646789,expression,E42,klaus,67890,Golang,1.18.3,true,42
1 timestamp measurement flagname host key platform sdkver value count_sum
2 1653643420 impression F5 1cbbb3796fc2 12345 Java 4.9.1 false 5
3 1653646789 expression E42 klaus 67890 Golang 1.18.3 true 42

View file

@ -0,0 +1,10 @@
# Example for outputting CSV
#
# Output File:
# testcases/nanoseconds.csv
#
# Input:
# impression,flagname=F5,host=1cbbb3796fc2,key=12345,platform=Java,sdkver=4.9.1,value=false count_sum=5i 1653643420123456
# expression,flagname=E42,host=klaus,key=67890,platform=Golang,sdkver=1.18.3,value=true count_sum=42i 1653646789789012
csv_timestamp_format = "unix_ns"

View file

@ -0,0 +1,2 @@
1653643420123456,impression,F5,1cbbb3796fc2,12345,Java,4.9.1,false,5
1653646789789012,expression,E42,klaus,67890,Golang,1.18.3,true,42
1 1653643420123456 impression F5 1cbbb3796fc2 12345 Java 4.9.1 false 5
2 1653646789789012 expression E42 klaus 67890 Golang 1.18.3 true 42

View file

@ -0,0 +1,11 @@
# Example for outputting CSV with a specified column order
#
# Output File:
# testcases/ordered.csv
#
# Input:
# mymetric,machine=A1,host=1cbbb3796fc2 pressure=987.5,temperature=23.7,hours=15i 1653643420000000000
# mymetric,machine=X9,host=83d2e491ca01 pressure=1022.6,temperature=39.9,hours=231i 1653646789000000000
csv_timestamp_format = "unix_ns"
csv_columns = ["timestamp", "field.temperature", "field.pressure", "tag.machine"]

View file

@ -0,0 +1,2 @@
1653643420000000000,23.7,987.5,A1
1653646789000000000,39.9,1022.6,X9
1 1653643420000000000 23.7 987.5 A1
2 1653646789000000000 39.9 1022.6 X9

View file

@ -0,0 +1,12 @@
# Example for outputting CSV with a specified column order
#
# Output File:
# testcases/ordered_not_exist.csv
#
# Input:
# mymetric,machine=A1,host=1cbbb3796fc2 pressure=987.5,temperature=23.7,hours=15i 1653643420000000000
# mymetric,machine=X9,host=83d2e491ca01 status="healthy",pressure=1022.6,temperature=39.9,hours=231i 1653646789000000000
csv_timestamp_format = "unix_ns"
csv_header = true
csv_columns = ["timestamp", "field.temperature", "field.pressure", "field.status", "tag.location", "tag.machine"]

View file

@ -0,0 +1,3 @@
timestamp,temperature,pressure,status,location,machine
1653643420000000000,23.7,987.5,,,A1
1653646789000000000,39.9,1022.6,healthy,,X9
1 timestamp temperature pressure status location machine
2 1653643420000000000 23.7 987.5 A1
3 1653646789000000000 39.9 1022.6 healthy X9

View file

@ -0,0 +1,12 @@
# Example for outputting CSV with a specified column order
#
# Output File:
# testcases/ordered_with_header.csv
#
# Input:
# mymetric,machine=A1,host=1cbbb3796fc2 pressure=987.5,temperature=23.7,hours=15i 1653643420000000000
# mymetric,machine=X9,host=83d2e491ca01 pressure=1022.6,temperature=39.9,hours=231i 1653646789000000000
csv_timestamp_format = "unix_ns"
csv_header = true
csv_columns = ["timestamp", "field.temperature", "field.pressure", "tag.machine"]

View file

@ -0,0 +1,3 @@
timestamp,temperature,pressure,machine
1653643420000000000,23.7,987.5,A1
1653646789000000000,39.9,1022.6,X9
1 timestamp temperature pressure machine
2 1653643420000000000 23.7 987.5 A1
3 1653646789000000000 39.9 1022.6 X9

View file

@ -0,0 +1,13 @@
# Example for outputting CSV with a specified column order
#
# Output File:
# testcases/ordered_with_header_prefix.csv
#
# Input:
# mymetric,machine=A1,host=1cbbb3796fc2 pressure=987.5,temperature=23.7,hours=15i 1653643420000000000
# mymetric,machine=X9,host=83d2e491ca01 pressure=1022.6,temperature=39.9,hours=231i 1653646789000000000
csv_timestamp_format = "unix_ns"
csv_header = true
csv_column_prefix = true
csv_columns = ["timestamp", "field.temperature", "field.pressure", "tag.machine"]

View file

@ -0,0 +1,3 @@
timestamp,field_temperature,field_pressure,tag_machine
1653643420000000000,23.7,987.5,A1
1653646789000000000,39.9,1022.6,X9
1 timestamp field_temperature field_pressure tag_machine
2 1653643420000000000 23.7 987.5 A1
3 1653646789000000000 39.9 1022.6 X9

View file

@ -0,0 +1,11 @@
# Example for outputting CSV in non-batch mode.
#
# Output File:
# testcases/prefix.csv
#
# Input:
# impression,flagname=F5,host=1cbbb3796fc2,key=12345,platform=Java,sdkver=4.9.1,value=false count_sum=5i 1653643420000000000
# expression,flagname=E42,host=klaus,key=67890,platform=Golang,sdkver=1.18.3,value=true count_sum=42i 1653646789000000000
csv_header = true
csv_column_prefix = true

View file

@ -0,0 +1,3 @@
timestamp,measurement,tag_flagname,tag_host,tag_key,tag_platform,tag_sdkver,tag_value,field_count_sum
1653643420,impression,F5,1cbbb3796fc2,12345,Java,4.9.1,false,5
1653646789,expression,E42,klaus,67890,Golang,1.18.3,true,42
1 timestamp measurement tag_flagname tag_host tag_key tag_platform tag_sdkver tag_value field_count_sum
2 1653643420 impression F5 1cbbb3796fc2 12345 Java 4.9.1 false 5
3 1653646789 expression E42 klaus 67890 Golang 1.18.3 true 42

View file

@ -0,0 +1,11 @@
# Example for outputting CSV in non-batch mode.
#
# Output File:
# testcases/rfc3339.csv
#
# Input:
# impression,flagname=F5,host=1cbbb3796fc2,key=12345,platform=Java,sdkver=4.9.1,value=false count_sum=5i 1653643420000000000
# expression,flagname=E42,host=klaus,key=67890,platform=Golang,sdkver=1.18.3,value=true count_sum=42i 1653646789000000000
csv_timestamp_format = "2006-01-02T15:04:05Z07:00"
csv_header = true

View file

@ -0,0 +1,3 @@
timestamp,measurement,flagname,host,key,platform,sdkver,value,count_sum
2022-05-27T09:23:40Z,impression,F5,1cbbb3796fc2,12345,Java,4.9.1,false,5
2022-05-27T10:19:49Z,expression,E42,klaus,67890,Golang,1.18.3,true,42
1 timestamp measurement flagname host key platform sdkver value count_sum
2 2022-05-27T09:23:40Z impression F5 1cbbb3796fc2 12345 Java 4.9.1 false 5
3 2022-05-27T10:19:49Z expression E42 klaus 67890 Golang 1.18.3 true 42

View file

@ -0,0 +1,11 @@
# Example for outputting CSV in non-batch mode.
#
# Output File:
# testcases/semicolon.csv
#
# Input:
# impression,flagname=F5,host=1cbbb3796fc2,key=12345,platform=Java,sdkver=4.9.1,value=false count_sum=5i 1653643420000000000
# expression,flagname=E42,host=klaus,key=67890,platform=Golang,sdkver=1.18.3,value=true count_sum=42i 1653646789000000000
csv_separator = ";"
csv_header = true

View file

@ -0,0 +1,3 @@
timestamp;measurement;flagname;host;key;platform;sdkver;value;count_sum
1653643420;impression;F5;1cbbb3796fc2;12345;Java;4.9.1;false;5
1653646789;expression;E42;klaus;67890;Golang;1.18.3;true;42
1 timestamp measurement flagname host key platform sdkver value count_sum
2 1653643420 impression F5 1cbbb3796fc2 12345 Java 4.9.1 false 5
3 1653646789 expression E42 klaus 67890 Golang 1.18.3 true 42

View file

@ -0,0 +1,6 @@
package serializers
import "github.com/influxdata/telegraf"
// Deprecations lists the deprecated plugins
var Deprecations = make(map[string]telegraf.DeprecationInfo)

View file

@ -0,0 +1,90 @@
# Graphite
The Graphite data format is translated from Telegraf Metrics using either the
template pattern or tag support method. You can select between the two
methods using the [`graphite_tag_support`](#graphite_tag_support) option. When set, the tag support
method is used, otherwise the [Template Pattern][templates] is used.
[templates]: /docs/TEMPLATE_PATTERN.md
## Configuration
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "graphite"
## Prefix added to each graphite bucket
prefix = "telegraf"
## Graphite template pattern
template = "host.tags.measurement.field"
## Graphite templates patterns
## 1. Template for cpu
## 2. Template for disk*
## 3. Default template
# templates = [
# "cpu tags.measurement.host.field",
# "disk* measurement.field",
# "host.measurement.tags.field"
#]
## Strict sanitization regex
## This is the default sanitization regex that is used on data passed to the
## graphite serializer. Users can add additional characters here if required.
## Be aware that the characters, '/' '@' '*' are always replaced with '_',
## '..' is replaced with '.', and '\' is removed even if added to the
## following regex.
# graphite_strict_sanitize_regex = '[^a-zA-Z0-9-:._=\p{L}]'
## Support Graphite tags, recommended to enable when using Graphite 1.1 or later.
# graphite_tag_support = false
## Applied sanitization mode when graphite tag support is enabled.
## * strict - uses the regex specified above
## * compatible - allows for greater number of characters
# graphite_tag_sanitize_mode = "strict"
## Character for separating metric name and field for Graphite tags
# graphite_separator = "."
```
### graphite_tag_support
When the `graphite_tag_support` option is enabled, the template pattern is not
used. Instead, tags are encoded using
[Graphite tag support](http://graphite.readthedocs.io/en/latest/tags.html)
added in Graphite 1.1. The `metric_path` is a combination of the optional
`prefix` option, measurement name, and field name.
The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`.
**Example Conversion**:
```text
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
=>
cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690
cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690
```
With set option `graphite_separator` to "_"
```text
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
=>
cpu_usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690
cpu_usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690
```
The `graphite_tag_sanitize_mode` option defines how we should sanitize the tag names and values. Possible values are `strict`, or `compatible`, with the default being `strict`.
When in `strict` mode Telegraf uses the same rules as metrics when not using tags.
When in `compatible` mode Telegraf allows more characters through, and is based on the Graphite specification:
>Tag names must have a length >= 1 and may contain any ascii characters except `;!^=`. Tag values must also have a length >= 1, they may contain any ascii characters except `;` and the first character must not be `~`. UTF-8 characters may work for names and values, but they are not well tested and it is not recommended to use non-ascii characters in metric names or tags. Metric names get indexed under the special tag name, if a metric name starts with one or multiple ~ they simply get removed from the derived tag value because the ~ character is not allowed to be in the first position of the tag value. If a metric name consists of no other characters than ~, then it is considered invalid and may get dropped.

View file

@ -0,0 +1,362 @@
package graphite
import (
"bytes"
"fmt"
"math"
"regexp"
"sort"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/plugins/serializers"
)
const DefaultTemplate = "host.tags.measurement.field"
var (
compatibleAllowedCharsName = regexp.MustCompile(`[^ "-:\<>-\]_a-~\p{L}]`) //nolint:gocritic // valid range for use-case
compatibleAllowedCharsValue = regexp.MustCompile(`[^ -:<-~\p{L}]`) //nolint:gocritic // valid range for use-case
compatibleLeadingTildeDrop = regexp.MustCompile(`^[~]*(.*)`)
hyphenChars = strings.NewReplacer(
"/", "-",
"@", "-",
"*", "-",
)
dropChars = strings.NewReplacer(
`\`, "",
"..", ".",
)
fieldDeleter = strings.NewReplacer(".FIELDNAME", "", "FIELDNAME.", "")
)
type GraphiteTemplate struct {
Filter filter.Filter
Value string
}
type GraphiteSerializer struct {
Prefix string `toml:"prefix"`
Template string `toml:"template"`
StrictRegex string `toml:"graphite_strict_sanitize_regex"`
TagSupport bool `toml:"graphite_tag_support"`
TagSanitizeMode string `toml:"graphite_tag_sanitize_mode"`
Separator string `toml:"graphite_separator"`
Templates []string `toml:"templates"`
tmplts []*GraphiteTemplate
strictAllowedChars *regexp.Regexp
}
func (s *GraphiteSerializer) Init() error {
graphiteTemplates, defaultTemplate, err := InitGraphiteTemplates(s.Templates)
if err != nil {
return err
}
s.tmplts = graphiteTemplates
if defaultTemplate != "" {
s.Template = defaultTemplate
}
if s.TagSanitizeMode == "" {
s.TagSanitizeMode = "strict"
}
if s.Separator == "" {
s.Separator = "."
}
if s.StrictRegex == "" {
s.strictAllowedChars = regexp.MustCompile(`[^a-zA-Z0-9-:._=\p{L}]`)
} else {
var err error
s.strictAllowedChars, err = regexp.Compile(s.StrictRegex)
if err != nil {
return fmt.Errorf("invalid regex provided %q: %w", s.StrictRegex, err)
}
}
return nil
}
func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
var out []byte
// Convert UnixNano to Unix timestamps
timestamp := metric.Time().UnixNano() / 1000000000
switch s.TagSupport {
case true:
for fieldName, value := range metric.Fields() {
fieldValue := formatValue(value)
if fieldValue == "" {
continue
}
bucket := s.SerializeBucketNameWithTags(metric.Name(), metric.Tags(), s.Prefix, s.Separator, fieldName, s.TagSanitizeMode)
metricString := fmt.Sprintf("%s %s %d\n",
// insert "field" section of template
bucket,
// bucket,
fieldValue,
timestamp)
point := []byte(metricString)
out = append(out, point...)
}
default:
template := s.Template
for _, graphiteTemplate := range s.tmplts {
if graphiteTemplate.Filter.Match(metric.Name()) {
template = graphiteTemplate.Value
break
}
}
bucket := SerializeBucketName(metric.Name(), metric.Tags(), template, s.Prefix)
if bucket == "" {
return out, nil
}
for fieldName, value := range metric.Fields() {
fieldValue := formatValue(value)
if fieldValue == "" {
continue
}
metricString := fmt.Sprintf("%s %s %d\n",
// insert "field" section of template
s.strictSanitize(InsertField(bucket, fieldName)),
fieldValue,
timestamp)
point := []byte(metricString)
out = append(out, point...)
}
}
return out, nil
}
func (s *GraphiteSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
var batch bytes.Buffer
for _, m := range metrics {
buf, err := s.Serialize(m)
if err != nil {
return nil, err
}
batch.Write(buf)
}
return batch.Bytes(), nil
}
func formatValue(value interface{}) string {
switch v := value.(type) {
case string:
return ""
case bool:
if v {
return "1"
}
return "0"
case uint64:
return strconv.FormatUint(v, 10)
case int64:
return strconv.FormatInt(v, 10)
case float64:
if math.IsNaN(v) {
return ""
}
if math.IsInf(v, 0) {
return ""
}
return strconv.FormatFloat(v, 'f', -1, 64)
}
return ""
}
// SerializeBucketName will take the given measurement name and tags and
// produce a graphite bucket. It will use the GraphiteSerializer.Template
// to generate this, or DefaultTemplate.
//
// NOTE: SerializeBucketName replaces the "field" portion of the template with
// FIELDNAME. It is up to the user to replace this. This is so that
// SerializeBucketName can be called just once per measurement, rather than
// once per field. See GraphiteSerializer.InsertField() function.
func SerializeBucketName(measurement string, tags map[string]string, template, prefix string) string {
if template == "" {
template = DefaultTemplate
}
tagsCopy := make(map[string]string)
for k, v := range tags {
tagsCopy[k] = v
}
var out []string
templateParts := strings.Split(template, ".")
for _, templatePart := range templateParts {
switch templatePart {
case "measurement":
out = append(out, measurement)
case "tags":
// we will replace this later
out = append(out, "TAGS")
case "field":
// user of SerializeBucketName needs to replace this
out = append(out, "FIELDNAME")
default:
// This is a tag being applied
if tagvalue, ok := tagsCopy[templatePart]; ok {
out = append(out, strings.ReplaceAll(tagvalue, ".", "_"))
delete(tagsCopy, templatePart)
}
}
}
// insert remaining tags into output name
for i, templatePart := range out {
if templatePart == "TAGS" {
out[i] = buildTags(tagsCopy)
break
}
}
if len(out) == 0 {
return ""
}
if prefix == "" {
return strings.Join(out, ".")
}
return prefix + "." + strings.Join(out, ".")
}
func InitGraphiteTemplates(templates []string) ([]*GraphiteTemplate, string, error) {
defaultTemplate := ""
graphiteTemplates := make([]*GraphiteTemplate, 0, len(templates))
for i, t := range templates {
parts := strings.Fields(t)
if len(parts) == 0 {
return nil, "", fmt.Errorf("missing template at position: %d", i)
}
if len(parts) == 1 {
if parts[0] == "" {
return nil, "", fmt.Errorf("missing template at position: %d", i)
}
// Override default template
defaultTemplate = t
continue
}
if len(parts) > 2 {
return nil, "", fmt.Errorf("invalid template format: %q", t)
}
tFilter, err := filter.Compile([]string{parts[0]})
if err != nil {
return nil, "", err
}
graphiteTemplates = append(graphiteTemplates, &GraphiteTemplate{
Filter: tFilter,
Value: parts[1],
})
}
return graphiteTemplates, defaultTemplate, nil
}
// SerializeBucketNameWithTags will take the given measurement name and tags and
// produce a graphite bucket. It will use the Graphite11Serializer.
// http://graphite.readthedocs.io/en/latest/tags.html
func (s *GraphiteSerializer) SerializeBucketNameWithTags(measurement string, tags map[string]string, prefix, separator, field, tagSanitizeMode string) string {
var out string
var tagsCopy []string
for k, v := range tags {
if k == "name" {
k = "_name"
}
if tagSanitizeMode == "compatible" {
tagsCopy = append(tagsCopy, compatibleSanitize(k, v))
} else {
tagsCopy = append(tagsCopy, s.strictSanitize(k+"="+v))
}
}
sort.Strings(tagsCopy)
if prefix != "" {
out = prefix + separator
}
out += measurement
if field != "value" {
out += separator + field
}
out = s.strictSanitize(out)
if len(tagsCopy) > 0 {
out += ";" + strings.Join(tagsCopy, ";")
}
return out
}
// InsertField takes the bucket string from SerializeBucketName and replaces the
// FIELDNAME portion. If fieldName == "value", it will simply delete the
// FIELDNAME portion.
func InsertField(bucket, fieldName string) string {
// if the field name is "value", then dont use it
if fieldName == "value" {
return fieldDeleter.Replace(bucket)
}
return strings.Replace(bucket, "FIELDNAME", fieldName, 1)
}
func buildTags(tags map[string]string) string {
keys := make([]string, 0, len(tags))
for k := range tags {
keys = append(keys, k)
}
sort.Strings(keys)
var tagStr string
for i, k := range keys {
tagValue := strings.ReplaceAll(tags[k], ".", "_")
if i == 0 {
tagStr += tagValue
} else {
tagStr += "." + tagValue
}
}
return tagStr
}
func (s *GraphiteSerializer) strictSanitize(value string) string {
// Apply special hyphenation rules to preserve backwards compatibility
value = hyphenChars.Replace(value)
// Apply rule to drop some chars to preserve backwards compatibility
value = dropChars.Replace(value)
// Replace any remaining illegal chars
return s.strictAllowedChars.ReplaceAllLiteralString(value, "_")
}
func compatibleSanitize(name, value string) string {
name = compatibleAllowedCharsName.ReplaceAllLiteralString(name, "_")
value = compatibleAllowedCharsValue.ReplaceAllLiteralString(value, "_")
value = compatibleLeadingTildeDrop.FindStringSubmatch(value)[1]
return name + "=" + value
}
func init() {
serializers.Add("graphite",
func() telegraf.Serializer {
return &GraphiteSerializer{}
},
)
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,51 @@
# Influx
The `influx` data format outputs metrics into [InfluxDB Line Protocol][line
protocol]. This is the recommended format unless another format is required
for interoperability.
## Configuration
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## Maximum line length in bytes. Useful only for debugging.
influx_max_line_bytes = 0
## When true, fields will be output in ascending lexical order. Enabling
## this option will result in decreased performance and is only recommended
## when you need predictable ordering while debugging.
influx_sort_fields = false
## When true, Telegraf will output unsigned integers as unsigned values,
## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned
## integer values. Enabling this option will result in field type errors if
## existing data has been written.
influx_uint_support = false
## When true, Telegraf will omit the timestamp on data to allow InfluxDB
## to set the timestamp of the data during ingestion. This is generally NOT
## what you want as it can lead to data points captured at different times
## getting omitted due to similar data.
# influx_omit_timestamp = false
```
## Metrics
Conversion is direct taking into account some limitations of the Line Protocol
format:
- Float fields that are `NaN` or `Inf` are skipped.
- Trailing backslash `\` characters are removed from tag keys and values.
- Tags with a key or value that is the empty string are skipped.
- When not using `influx_uint_support`, unsigned integers are capped at the max int64.
[line protocol]: https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/

View file

@ -0,0 +1,59 @@
package influx
import "strings"
const (
escapes = "\t\n\f\r ,="
nameEscapes = "\t\n\f\r ,"
stringFieldEscapes = "\t\n\f\r\\\""
)
var (
escaper = strings.NewReplacer(
"\t", `\t`,
"\n", `\n`,
"\f", `\f`,
"\r", `\r`,
`,`, `\,`,
` `, `\ `,
`=`, `\=`,
)
nameEscaper = strings.NewReplacer(
"\t", `\t`,
"\n", `\n`,
"\f", `\f`,
"\r", `\r`,
`,`, `\,`,
` `, `\ `,
)
stringFieldEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
)
// Escape a tagkey, tagvalue, or fieldkey
func escape(s string) string {
if strings.ContainsAny(s, escapes) {
return escaper.Replace(s)
}
return s
}
// Escape a measurement name
func nameEscape(s string) string {
if strings.ContainsAny(s, nameEscapes) {
return nameEscaper.Replace(s)
}
return s
}
// Escape a string field
func stringFieldEscape(s string) string {
if strings.ContainsAny(s, stringFieldEscapes) {
return stringFieldEscaper.Replace(s)
}
return s
}

View file

@ -0,0 +1,335 @@
package influx
import (
"bytes"
"errors"
"fmt"
"io"
"log"
"math"
"sort"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/serializers"
)
const (
MaxInt64 = int64(^uint64(0) >> 1)
NeedMoreSpace = "need more space"
InvalidName = "invalid name"
NoFields = "no serializable fields"
)
// MetricError is an error causing an entire metric to be unserializable.
type MetricError struct {
series string
reason string
}
func (e MetricError) Error() string {
if e.series != "" {
return fmt.Sprintf("%q: %s", e.series, e.reason)
}
return e.reason
}
// FieldError is an error causing a field to be unserializable.
type FieldError struct {
reason string
}
func (e FieldError) Error() string {
return e.reason
}
// Serializer is a serializer for line protocol.
type Serializer struct {
MaxLineBytes int `toml:"influx_max_line_bytes"`
SortFields bool `toml:"influx_sort_fields"`
UintSupport bool `toml:"influx_uint_support"`
OmitTimestamp bool `toml:"influx_omit_timestamp"`
bytesWritten int
buf bytes.Buffer
header []byte
footer []byte
pair []byte
}
func (s *Serializer) Init() error {
s.header = make([]byte, 0, 50)
s.footer = make([]byte, 0, 21)
s.pair = make([]byte, 0, 50)
return nil
}
// Serialize writes the telegraf.Metric to a byte slice. May produce multiple
// lines of output if longer than maximum line length. Lines are terminated
// with a newline (LF) char.
func (s *Serializer) Serialize(m telegraf.Metric) ([]byte, error) {
s.buf.Reset()
err := s.writeMetric(&s.buf, m)
if err != nil {
return nil, err
}
out := make([]byte, 0, s.buf.Len())
return append(out, s.buf.Bytes()...), nil
}
// SerializeBatch writes the slice of metrics and returns a byte slice of the
// results. The returned byte slice may contain multiple lines of data.
func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
s.buf.Reset()
for _, m := range metrics {
err := s.Write(&s.buf, m)
if err != nil {
var mErr *MetricError
if errors.As(err, &mErr) {
continue
}
return nil, err
}
}
out := make([]byte, 0, s.buf.Len())
return append(out, s.buf.Bytes()...), nil
}
func (s *Serializer) Write(w io.Writer, m telegraf.Metric) error {
return s.writeMetric(w, m)
}
func (s *Serializer) writeString(w io.Writer, str string) error {
n, err := io.WriteString(w, str)
s.bytesWritten += n
return err
}
func (s *Serializer) writeBytes(w io.Writer, b []byte) error {
n, err := w.Write(b)
s.bytesWritten += n
return err
}
func (s *Serializer) buildHeader(m telegraf.Metric) error {
s.header = s.header[:0]
name := nameEscape(m.Name())
if name == "" {
return s.newMetricError(InvalidName)
}
s.header = append(s.header, name...)
for _, tag := range m.TagList() {
key := escape(tag.Key)
value := escape(tag.Value)
// Tag keys and values that end with a backslash cannot be encoded by
// line protocol.
if strings.HasSuffix(key, `\`) {
key = strings.TrimRight(key, `\`)
}
if strings.HasSuffix(value, `\`) {
value = strings.TrimRight(value, `\`)
}
// Tag keys and values must not be the empty string.
if key == "" || value == "" {
continue
}
s.header = append(s.header, ',')
s.header = append(s.header, key...)
s.header = append(s.header, '=')
s.header = append(s.header, value...)
}
s.header = append(s.header, ' ')
return nil
}
func (s *Serializer) buildFooter(m telegraf.Metric) {
s.footer = s.footer[:0]
if !s.OmitTimestamp {
s.footer = append(s.footer, ' ')
s.footer = strconv.AppendInt(s.footer, m.Time().UnixNano(), 10)
}
s.footer = append(s.footer, '\n')
}
func (s *Serializer) buildFieldPair(key string, value interface{}) error {
s.pair = s.pair[:0]
key = escape(key)
// Some keys are not encodeable as line protocol, such as those with a
// trailing '\' or empty strings.
if key == "" {
return &FieldError{"invalid field key"}
}
s.pair = append(s.pair, key...)
s.pair = append(s.pair, '=')
pair, err := s.appendFieldValue(s.pair, value)
if err != nil {
return err
}
s.pair = pair
return nil
}
func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error {
var err error
err = s.buildHeader(m)
if err != nil {
return err
}
s.buildFooter(m)
if s.SortFields {
sort.Slice(m.FieldList(), func(i, j int) bool {
return m.FieldList()[i].Key < m.FieldList()[j].Key
})
}
pairsLen := 0
firstField := true
for _, field := range m.FieldList() {
err = s.buildFieldPair(field.Key, field.Value)
if err != nil {
log.Printf(
"D! [serializers.influx] could not serialize field %q: %v; discarding field",
field.Key, err)
continue
}
bytesNeeded := len(s.header) + pairsLen + len(s.pair) + len(s.footer)
// Additional length needed for field separator `,`
if !firstField {
bytesNeeded++
}
if s.MaxLineBytes > 0 && bytesNeeded > s.MaxLineBytes {
// Need at least one field per line, this metric cannot be fit
// into the max line bytes.
if firstField {
return s.newMetricError(NeedMoreSpace)
}
err = s.writeBytes(w, s.footer)
if err != nil {
return err
}
pairsLen = 0
firstField = true
bytesNeeded = len(s.header) + len(s.pair) + len(s.footer)
if bytesNeeded > s.MaxLineBytes {
return s.newMetricError(NeedMoreSpace)
}
}
if firstField {
err = s.writeBytes(w, s.header)
if err != nil {
return err
}
} else {
err = s.writeString(w, ",")
if err != nil {
return err
}
}
err = s.writeBytes(w, s.pair)
if err != nil {
return err
}
pairsLen += len(s.pair)
firstField = false
}
if firstField {
return s.newMetricError(NoFields)
}
return s.writeBytes(w, s.footer)
}
func (s *Serializer) newMetricError(reason string) *MetricError {
if len(s.header) != 0 {
series := bytes.TrimRight(s.header, " ")
return &MetricError{series: string(series), reason: reason}
}
return &MetricError{reason: reason}
}
func (s *Serializer) appendFieldValue(buf []byte, value interface{}) ([]byte, error) {
switch v := value.(type) {
case uint64:
if s.UintSupport {
return appendUintField(buf, v), nil
}
if v <= uint64(MaxInt64) {
return appendIntField(buf, int64(v)), nil
}
return appendIntField(buf, MaxInt64), nil
case int64:
return appendIntField(buf, v), nil
case float64:
if math.IsNaN(v) {
return nil, &FieldError{"is NaN"}
}
if math.IsInf(v, 0) {
return nil, &FieldError{"is Inf"}
}
return appendFloatField(buf, v), nil
case string:
return appendStringField(buf, v), nil
case bool:
return appendBoolField(buf, v), nil
default:
return buf, &FieldError{fmt.Sprintf("invalid value type: %T", v)}
}
}
func appendUintField(buf []byte, value uint64) []byte {
return append(strconv.AppendUint(buf, value, 10), 'u')
}
func appendIntField(buf []byte, value int64) []byte {
return append(strconv.AppendInt(buf, value, 10), 'i')
}
func appendFloatField(buf []byte, value float64) []byte {
return strconv.AppendFloat(buf, value, 'f', -1, 64)
}
func appendBoolField(buf []byte, value bool) []byte {
return strconv.AppendBool(buf, value)
}
func appendStringField(buf []byte, value string) []byte {
buf = append(buf, '"')
buf = append(buf, stringFieldEscape(value)...)
buf = append(buf, '"')
return buf
}
func init() {
serializers.Add("influx",
func() telegraf.Serializer {
return &Serializer{}
},
)
}

View file

@ -0,0 +1,575 @@
package influx
import (
"math"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/serializers"
)
var tests = []struct {
name string
maxBytes int
uintSupport bool
input telegraf.Metric
output []byte
errReason string
}{
{
name: "minimal",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
output: []byte("cpu value=42 0\n"),
},
{
name: "multiple tags",
input: metric.New(
"cpu",
map[string]string{
"host": "localhost",
"cpu": "CPU0",
},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
output: []byte("cpu,cpu=CPU0,host=localhost value=42 0\n"),
},
{
name: "multiple fields",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"x": 42.0,
"y": 42.0,
},
time.Unix(0, 0),
),
output: []byte("cpu x=42,y=42 0\n"),
},
{
name: "float NaN",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"x": math.NaN(),
"y": 42,
},
time.Unix(0, 0),
),
output: []byte("cpu y=42i 0\n"),
},
{
name: "float NaN only",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": math.NaN(),
},
time.Unix(0, 0),
),
errReason: NoFields,
},
{
name: "float Inf",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": math.Inf(1),
"y": 42,
},
time.Unix(0, 0),
),
output: []byte("cpu y=42i 0\n"),
},
{
name: "integer field",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
output: []byte("cpu value=42i 0\n"),
},
{
name: "integer field 64-bit",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": int64(123456789012345),
},
time.Unix(0, 0),
),
output: []byte("cpu value=123456789012345i 0\n"),
},
{
name: "uint field",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": uint64(42),
},
time.Unix(0, 0),
),
output: []byte("cpu value=42u 0\n"),
uintSupport: true,
},
{
name: "uint field max value",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": uint64(18446744073709551615),
},
time.Unix(0, 0),
),
output: []byte("cpu value=18446744073709551615u 0\n"),
uintSupport: true,
},
{
name: "uint field no uint support",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": uint64(42),
},
time.Unix(0, 0),
),
output: []byte("cpu value=42i 0\n"),
},
{
name: "uint field no uint support overflow",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": uint64(18446744073709551615),
},
time.Unix(0, 0),
),
output: []byte("cpu value=9223372036854775807i 0\n"),
},
{
name: "bool field",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": true,
},
time.Unix(0, 0),
),
output: []byte("cpu value=true 0\n"),
},
{
name: "string field",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": "howdy",
},
time.Unix(0, 0),
),
output: []byte("cpu value=\"howdy\" 0\n"),
},
{
name: "timestamp",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(1519194109, 42),
),
output: []byte("cpu value=42 1519194109000000042\n"),
},
{
name: "split fields exact",
maxBytes: 33,
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"abc": 123,
"def": 456,
},
time.Unix(1519194109, 42),
),
output: []byte("cpu abc=123i 1519194109000000042\ncpu def=456i 1519194109000000042\n"),
},
{
name: "split fields extra",
maxBytes: 34,
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"abc": 123,
"def": 456,
},
time.Unix(1519194109, 42),
),
output: []byte("cpu abc=123i 1519194109000000042\ncpu def=456i 1519194109000000042\n"),
},
{
name: "split_fields_overflow",
maxBytes: 43,
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"abc": 123,
"def": 456,
"ghi": 789,
"jkl": 123,
},
time.Unix(1519194109, 42),
),
output: []byte("cpu abc=123i,def=456i 1519194109000000042\ncpu ghi=789i,jkl=123i 1519194109000000042\n"),
},
{
name: "name newline",
input: metric.New(
"c\npu",
map[string]string{},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
output: []byte("c\\npu value=42i 0\n"),
},
{
name: "tag newline",
input: metric.New(
"cpu",
map[string]string{
"host": "x\ny",
},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
output: []byte("cpu,host=x\\ny value=42i 0\n"),
},
{
name: "empty tag value is removed",
input: metric.New(
"cpu",
map[string]string{
"host": "",
},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
output: []byte("cpu value=42i 0\n"),
},
{
name: "empty tag key is removed",
input: metric.New(
"cpu",
map[string]string{
"": "example.org",
},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
output: []byte("cpu value=42i 0\n"),
},
{
name: "tag value ends with backslash is trimmed",
input: metric.New(
"disk",
map[string]string{
"path": `C:\`,
},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
output: []byte("disk,path=C: value=42i 0\n"),
},
{
name: "tag key ends with backslash is trimmed",
input: metric.New(
"disk",
map[string]string{
`path\`: "/",
},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
output: []byte("disk,path=/ value=42i 0\n"),
},
{
name: "tag key backslash is trimmed and removed",
input: metric.New(
"disk",
map[string]string{
`\`: "example.org",
},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
output: []byte("disk value=42i 0\n"),
},
{
name: "tag value backslash is trimmed and removed",
input: metric.New(
"disk",
map[string]string{
"host": `\`,
},
map[string]interface{}{
"value": 42,
},
time.Unix(0, 0),
),
output: []byte("disk value=42i 0\n"),
},
{
name: "string newline",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": "x\ny",
},
time.Unix(0, 0),
),
output: []byte("cpu value=\"x\ny\" 0\n"),
},
{
name: "need more space",
maxBytes: 32,
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"abc": 123,
"def": 456,
},
time.Unix(1519194109, 42),
),
output: nil,
errReason: NeedMoreSpace,
},
{
name: "no fields",
input: metric.New(
"cpu",
map[string]string{},
map[string]interface{}{},
time.Unix(0, 0),
),
errReason: NoFields,
},
{
name: "procstat",
input: metric.New(
"procstat",
map[string]string{
"exe": "bash",
"process_name": "bash",
},
map[string]interface{}{
"cpu_time": 0,
"cpu_time_guest": float64(0),
"cpu_time_guest_nice": float64(0),
"cpu_time_idle": float64(0),
"cpu_time_iowait": float64(0),
"cpu_time_irq": float64(0),
"cpu_time_nice": float64(0),
"cpu_time_soft_irq": float64(0),
"cpu_time_steal": float64(0),
"cpu_time_system": float64(0),
"cpu_time_user": float64(0.02),
"cpu_usage": float64(0),
"involuntary_context_switches": 2,
"memory_data": 1576960,
"memory_locked": 0,
"memory_rss": 5103616,
"memory_stack": 139264,
"memory_swap": 0,
"memory_vms": 21659648,
"nice_priority": 20,
"num_fds": 4,
"num_threads": 1,
"pid": 29417,
"read_bytes": 0,
"read_count": 259,
"realtime_priority": 0,
"rlimit_cpu_time_hard": 2147483647,
"rlimit_cpu_time_soft": 2147483647,
"rlimit_file_locks_hard": 2147483647,
"rlimit_file_locks_soft": 2147483647,
"rlimit_memory_data_hard": 2147483647,
"rlimit_memory_data_soft": 2147483647,
"rlimit_memory_locked_hard": 65536,
"rlimit_memory_locked_soft": 65536,
"rlimit_memory_rss_hard": 2147483647,
"rlimit_memory_rss_soft": 2147483647,
"rlimit_memory_stack_hard": 2147483647,
"rlimit_memory_stack_soft": 8388608,
"rlimit_memory_vms_hard": 2147483647,
"rlimit_memory_vms_soft": 2147483647,
"rlimit_nice_priority_hard": 0,
"rlimit_nice_priority_soft": 0,
"rlimit_num_fds_hard": 4096,
"rlimit_num_fds_soft": 1024,
"rlimit_realtime_priority_hard": 0,
"rlimit_realtime_priority_soft": 0,
"rlimit_signals_pending_hard": 78994,
"rlimit_signals_pending_soft": 78994,
"signals_pending": 0,
"voluntary_context_switches": 42,
"write_bytes": 106496,
"write_count": 35,
},
time.Unix(0, 1517620624000000000),
),
output: []byte(
"procstat,exe=bash,process_name=bash cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0," +
"cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_system=0,cpu_time_user=0.02,cpu_usage=0,involuntary_context_switches=2i," +
"memory_data=1576960i,memory_locked=0i,memory_rss=5103616i,memory_stack=139264i,memory_swap=0i,memory_vms=21659648i,nice_priority=20i," +
"num_fds=4i,num_threads=1i,pid=29417i,read_bytes=0i,read_count=259i,realtime_priority=0i,rlimit_cpu_time_hard=2147483647i," +
"rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_file_locks_soft=2147483647i,rlimit_memory_data_hard=2147483647i," +
"rlimit_memory_data_soft=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_memory_locked_soft=65536i,rlimit_memory_rss_hard=2147483647i," +
"rlimit_memory_rss_soft=2147483647i,rlimit_memory_stack_hard=2147483647i,rlimit_memory_stack_soft=8388608i," +
"rlimit_memory_vms_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i," +
"rlimit_num_fds_hard=4096i,rlimit_num_fds_soft=1024i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i," +
"rlimit_signals_pending_hard=78994i,rlimit_signals_pending_soft=78994i,signals_pending=0i,voluntary_context_switches=42i," +
"write_bytes=106496i,write_count=35i 1517620624000000000\n",
),
},
}
func TestSerializer(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
serializer := &Serializer{
MaxLineBytes: tt.maxBytes,
SortFields: true,
UintSupport: tt.uintSupport,
}
output, err := serializer.Serialize(tt.input)
if tt.errReason != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.errReason)
}
require.Equal(t, string(tt.output), string(output))
})
}
}
func TestOmitTimestamp(t *testing.T) {
m := metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(1519194109, 42),
)
serializer := &Serializer{
OmitTimestamp: true,
}
output, err := serializer.Serialize(m)
require.NoError(t, err)
require.Equal(t, []byte("cpu value=42\n"), output)
}
func BenchmarkSerializer(b *testing.B) {
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
serializer := &Serializer{
MaxLineBytes: tt.maxBytes,
UintSupport: tt.uintSupport,
}
for n := 0; n < b.N; n++ {
output, err := serializer.Serialize(tt.input)
_ = err
_ = output
}
})
}
}
func TestSerialize_SerializeBatch(t *testing.T) {
m := metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
)
metrics := []telegraf.Metric{m, m}
serializer := &Serializer{
SortFields: true,
}
output, err := serializer.SerializeBatch(metrics)
require.NoError(t, err)
require.Equal(t, []byte("cpu value=42 0\ncpu value=42 0\n"), output)
}
func BenchmarkSerialize(b *testing.B) {
s := &Serializer{}
require.NoError(b, s.Init())
metrics := serializers.BenchmarkMetrics(b)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.Serialize(metrics[i%len(metrics)])
require.NoError(b, err)
}
}
func BenchmarkSerializeBatch(b *testing.B) {
s := &Serializer{}
require.NoError(b, s.Init())
m := serializers.BenchmarkMetrics(b)
metrics := m[:]
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.SerializeBatch(metrics)
require.NoError(b, err)
}
}

View file

@ -0,0 +1,70 @@
package influx
import (
"bytes"
"errors"
"io"
"log"
"github.com/influxdata/telegraf"
)
// reader is an io.Reader for line protocol.
type reader struct {
metrics []telegraf.Metric
serializer *Serializer
offset int
buf *bytes.Buffer
}
// NewReader creates a new reader over the given metrics.
func NewReader(metrics []telegraf.Metric, serializer *Serializer) io.Reader {
return &reader{
metrics: metrics,
serializer: serializer,
offset: 0,
buf: bytes.NewBuffer(make([]byte, 0, serializer.MaxLineBytes)),
}
}
// SetMetrics changes the metrics to be read.
func (r *reader) SetMetrics(metrics []telegraf.Metric) {
r.metrics = metrics
r.offset = 0
r.buf.Reset()
}
// Read reads up to len(p) bytes of the current metric into p, each call will
// only serialize at most one metric so the number of bytes read may be less
// than p. Subsequent calls to Read will read the next metric until all are
// emitted. If a metric cannot be serialized, an error will be returned, you
// may resume with the next metric by calling Read again. When all metrics
// are emitted the err is io.EOF.
func (r *reader) Read(p []byte) (int, error) {
if r.buf.Len() > 0 {
return r.buf.Read(p)
}
if r.offset >= len(r.metrics) {
return 0, io.EOF
}
for _, metric := range r.metrics[r.offset:] {
err := r.serializer.Write(r.buf, metric)
r.offset++
if err != nil {
r.buf.Reset()
var mErr *MetricError
if errors.As(err, &mErr) {
continue
}
// Since we are serializing multiple metrics, don't fail the
// the entire batch just because of one unserializable metric.
log.Printf("E! [serializers.influx] could not serialize metric: %v; discarding metric", err)
continue
}
break
}
return r.buf.Read(p)
}

View file

@ -0,0 +1,262 @@
package influx
import (
"bytes"
"io"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
)
func TestReader(t *testing.T) {
tests := []struct {
name string
maxLineBytes int
bufferSize int
input []telegraf.Metric
expected []byte
}{
{
name: "minimal",
maxLineBytes: 4096,
bufferSize: 20,
input: []telegraf.Metric{
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte("cpu value=42 0\n"),
},
{
name: "multiple lines",
maxLineBytes: 4096,
bufferSize: 20,
input: []telegraf.Metric{
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte("cpu value=42 0\ncpu value=42 0\n"),
},
{
name: "exact fit",
maxLineBytes: 4096,
bufferSize: 15,
input: []telegraf.Metric{
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte("cpu value=42 0\n"),
},
{
name: "continue on failed metrics",
maxLineBytes: 4096,
bufferSize: 15,
input: []telegraf.Metric{
metric.New(
"",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte("cpu value=42 0\n"),
},
{
name: "last metric failed regression",
maxLineBytes: 4096,
bufferSize: 15,
input: []telegraf.Metric{
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
metric.New(
"",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte("cpu value=42 0\n"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
serializer := &Serializer{
MaxLineBytes: tt.maxLineBytes,
SortFields: true,
}
reader := NewReader(tt.input, serializer)
data := new(bytes.Buffer)
readbuf := make([]byte, tt.bufferSize)
total := 0
for {
n, err := reader.Read(readbuf)
total += n
if err == io.EOF {
break
}
data.Write(readbuf[:n])
require.NoError(t, err)
}
require.Equal(t, tt.expected, data.Bytes())
require.Len(t, tt.expected, total)
})
}
}
func TestZeroLengthBufferNoError(t *testing.T) {
m := metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
)
serializer := &Serializer{
SortFields: true,
}
reader := NewReader([]telegraf.Metric{m}, serializer)
readbuf := make([]byte, 0)
n, err := reader.Read(readbuf)
require.NoError(t, err)
require.Equal(t, 0, n)
}
func BenchmarkReader(b *testing.B) {
m := metric.New(
"procstat",
map[string]string{
"exe": "bash",
"process_name": "bash",
},
map[string]interface{}{
"cpu_time": 0,
"cpu_time_guest": float64(0),
"cpu_time_guest_nice": float64(0),
"cpu_time_idle": float64(0),
"cpu_time_iowait": float64(0),
"cpu_time_irq": float64(0),
"cpu_time_nice": float64(0),
"cpu_time_soft_irq": float64(0),
"cpu_time_steal": float64(0),
"cpu_time_system": float64(0),
"cpu_time_user": float64(0.02),
"cpu_usage": float64(0),
"involuntary_context_switches": 2,
"memory_data": 1576960,
"memory_locked": 0,
"memory_rss": 5103616,
"memory_stack": 139264,
"memory_swap": 0,
"memory_vms": 21659648,
"nice_priority": 20,
"num_fds": 4,
"num_threads": 1,
"pid": 29417,
"read_bytes": 0,
"read_count": 259,
"realtime_priority": 0,
"rlimit_cpu_time_hard": 2147483647,
"rlimit_cpu_time_soft": 2147483647,
"rlimit_file_locks_hard": 2147483647,
"rlimit_file_locks_soft": 2147483647,
"rlimit_memory_data_hard": 2147483647,
"rlimit_memory_data_soft": 2147483647,
"rlimit_memory_locked_hard": 65536,
"rlimit_memory_locked_soft": 65536,
"rlimit_memory_rss_hard": 2147483647,
"rlimit_memory_rss_soft": 2147483647,
"rlimit_memory_stack_hard": 2147483647,
"rlimit_memory_stack_soft": 8388608,
"rlimit_memory_vms_hard": 2147483647,
"rlimit_memory_vms_soft": 2147483647,
"rlimit_nice_priority_hard": 0,
"rlimit_nice_priority_soft": 0,
"rlimit_num_fds_hard": 4096,
"rlimit_num_fds_soft": 1024,
"rlimit_realtime_priority_hard": 0,
"rlimit_realtime_priority_soft": 0,
"rlimit_signals_pending_hard": 78994,
"rlimit_signals_pending_soft": 78994,
"signals_pending": 0,
"voluntary_context_switches": 42,
"write_bytes": 106496,
"write_count": 35,
},
time.Unix(0, 1517620624000000000),
)
metrics := make([]telegraf.Metric, 0, 1000)
for i := 0; i < 1000; i++ {
metrics = append(metrics, m)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
readbuf := make([]byte, 4096)
serializer := &Serializer{}
reader := NewReader(metrics, serializer)
for {
_, err := reader.Read(readbuf)
if err == io.EOF {
break
}
if err != nil {
panic(err.Error())
}
}
}
}

View file

@ -0,0 +1,309 @@
# JSON
The `json` output data format converts metrics into JSON documents.
## Configuration
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "json"
## The resolution to use for the metric timestamp. Must be a duration string
## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to
## the power of 10 less than the specified units.
json_timestamp_units = "1s"
## The default timestamp format is Unix epoch time, subject to the
# resolution configured in json_timestamp_units.
# Other timestamp layout can be configured using the Go language time
# layout specification from https://golang.org/pkg/time/#Time.Format
# e.g.: json_timestamp_format = "2006-01-02T15:04:05Z07:00"
#json_timestamp_format = ""
## A [JSONata](https://jsonata.org/) transformation of the JSON in
## [standard-form](#examples). Please note that only version 1.5.4 of the
## JSONata is supported due to the underlying library used.
## This allows to generate an arbitrary output form based on the metric(s). Please use
## multiline strings (starting and ending with three single-quotes) if needed.
#json_transformation = ""
## Filter for fields that contain nested JSON data.
## The serializer will try to decode matching STRING fields containing
## valid JSON. This is done BEFORE any JSON transformation. The filters
## can contain wildcards.
#json_nested_fields_include = []
#json_nested_fields_exclude = []
```
## Examples
Standard form:
```json
{
"fields": {
"field_1": 30,
"field_2": 4,
"field_N": 59,
"n_images": 660
},
"name": "docker",
"tags": {
"host": "raynor"
},
"timestamp": 1458229140
}
```
When an output plugin needs to emit multiple metrics at one time, it may use
the batch format. The use of batch format is determined by the plugin,
reference the documentation for the specific plugin.
```json
{
"metrics": [
{
"fields": {
"field_1": 30,
"field_2": 4,
"field_N": 59,
"n_images": 660
},
"name": "docker",
"tags": {
"host": "raynor"
},
"timestamp": 1458229140
},
{
"fields": {
"field_1": 30,
"field_2": 4,
"field_N": 59,
"n_images": 660
},
"name": "docker",
"tags": {
"host": "raynor"
},
"timestamp": 1458229140
}
]
}
```
## Transformations
Transformations using the [JSONata standard](https://jsonata.org/) can be specified with
the `json_transformation` parameter. The input to the transformation is the serialized
metric in the standard-form above.
**Note**: There is a difference in batch and non-batch serialization mode!
The former adds a `metrics` field containing the metric array, while the later
serializes the metric directly.
**Note**: Please note that the JSONata support is limited to version 1.5.4 due
to the underlying library used by Telegraf. When using the online playground
below ensure that you have selected 1.5.4 when trying examples or building
transformations.
In the following sections, some rudimentary examples for transformations are shown.
For more elaborated JSONata expressions please consult the
[documentation](https://docs.jsonata.org) or the
[online playground](https://try.jsonata.org).
### Non-batch mode
In the following examples, we will use the following input to the transformation:
```json
{
"fields": {
"field_1": 30,
"field_2": 4,
"field_N": 59,
"n_images": 660
},
"name": "docker",
"tags": {
"host": "raynor"
},
"timestamp": 1458229140
}
```
If you want to flatten the above metric, you can use
```json
$merge([{"name": name, "timestamp": timestamp}, tags, fields])
```
to get
```json
{
"name": "docker",
"timestamp": 1458229140,
"host": "raynor",
"field_1": 30,
"field_2": 4,
"field_N": 59,
"n_images": 660
}
```
It is also possible to do arithmetic or renaming
```json
{
"capacity": $sum($sift($.fields,function($value,$key){$key~>/^field_/}).*),
"images": fields.n_images,
"host": tags.host,
"time": $fromMillis(timestamp*1000)
}
```
will result in
```json
{
"capacity": 93,
"images": 660,
"host": "raynor",
"time": "2016-03-17T15:39:00.000Z"
}
```
### Batch mode
When an output plugin emits multiple metrics in a batch fashion it might be useful
to restructure and/or combine the metric elements. We will use the following input
example in this section
```json
{
"metrics": [
{
"fields": {
"field_1": 30,
"field_2": 4,
"field_N": 59,
"n_images": 660
},
"name": "docker",
"tags": {
"host": "raynor"
},
"timestamp": 1458229140
},
{
"fields": {
"field_1": 12,
"field_2": 43,
"field_3": 0,
"field_4": 5,
"field_5": 7,
"field_N": 27,
"n_images": 72
},
"name": "docker",
"tags": {
"host": "amaranth"
},
"timestamp": 1458229140
},
{
"fields": {
"field_1": 5,
"field_N": 34,
"n_images": 0
},
"name": "storage",
"tags": {
"host": "amaranth"
},
"timestamp": 1458229140
}
]
}
```
We can do the same computation as above, iterating over the metrics
```json
metrics.{
"capacity": $sum($sift($.fields,function($value,$key){$key~>/^field_/}).*),
"images": fields.n_images,
"service": (name & "(" & tags.host & ")"),
"time": $fromMillis(timestamp*1000)
}
```
resulting in
```json
[
{
"capacity": 93,
"images": 660,
"service": "docker(raynor)",
"time": "2016-03-17T15:39:00.000Z"
},
{
"capacity": 94,
"images": 72,
"service": "docker(amaranth)",
"time": "2016-03-17T15:39:00.000Z"
},
{
"capacity": 39,
"images": 0,
"service": "storage(amaranth)",
"time": "2016-03-17T15:39:00.000Z"
}
]
```
However, the more interesting use-case is to restructure and **combine** the metrics, e.g. by grouping by `host`
```json
{
"time": $min(metrics.timestamp) * 1000 ~> $fromMillis(),
"images": metrics{
tags.host: {
name: fields.n_images
}
},
"capacity alerts": metrics[fields.n_images < 10].[(tags.host & " " & name)]
}
```
resulting in
```json
{
"time": "2016-03-17T15:39:00.000Z",
"images": {
"raynor": {
"docker": 660
},
"amaranth": {
"docker": 72,
"storage": 0
}
},
"capacity alerts": [
"amaranth storage"
]
}
```
Please consult the JSONata documentation for more examples and details.

View file

@ -0,0 +1,169 @@
package json
import (
"encoding/json"
"errors"
"fmt"
"math"
"time"
"github.com/blues/jsonata-go"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/plugins/serializers"
)
type Serializer struct {
TimestampUnits config.Duration `toml:"json_timestamp_units"`
TimestampFormat string `toml:"json_timestamp_format"`
Transformation string `toml:"json_transformation"`
NestedFieldsInclude []string `toml:"json_nested_fields_include"`
NestedFieldsExclude []string `toml:"json_nested_fields_exclude"`
nestedfields filter.Filter
}
func (s *Serializer) Init() error {
// Default precision is 1s
if s.TimestampUnits <= 0 {
s.TimestampUnits = config.Duration(time.Second)
}
// Search for the power of ten less than the duration
d := time.Nanosecond
t := time.Duration(s.TimestampUnits)
for {
if d*10 > t {
t = d
break
}
d = d * 10
}
s.TimestampUnits = config.Duration(t)
if len(s.NestedFieldsInclude) > 0 || len(s.NestedFieldsExclude) > 0 {
f, err := filter.NewIncludeExcludeFilter(s.NestedFieldsInclude, s.NestedFieldsExclude)
if err != nil {
return err
}
s.nestedfields = f
}
return nil
}
func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) {
var obj interface{}
obj = s.createObject(metric)
if s.Transformation != "" {
var err error
if obj, err = s.transform(obj); err != nil {
if errors.Is(err, jsonata.ErrUndefined) {
return nil, fmt.Errorf("%w (maybe configured for batch mode?)", err)
}
return nil, err
}
}
serialized, err := json.Marshal(obj)
if err != nil {
return nil, err
}
serialized = append(serialized, '\n')
return serialized, nil
}
func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
objects := make([]interface{}, 0, len(metrics))
for _, metric := range metrics {
m := s.createObject(metric)
objects = append(objects, m)
}
var obj interface{}
obj = map[string]interface{}{
"metrics": objects,
}
if s.Transformation != "" {
var err error
if obj, err = s.transform(obj); err != nil {
if errors.Is(err, jsonata.ErrUndefined) {
return nil, fmt.Errorf("%w (maybe configured for non-batch mode?)", err)
}
return nil, err
}
}
serialized, err := json.Marshal(obj)
if err != nil {
return nil, err
}
serialized = append(serialized, '\n')
return serialized, nil
}
func (s *Serializer) createObject(metric telegraf.Metric) map[string]interface{} {
m := make(map[string]interface{}, 4)
tags := make(map[string]string, len(metric.TagList()))
for _, tag := range metric.TagList() {
tags[tag.Key] = tag.Value
}
m["tags"] = tags
fields := make(map[string]interface{}, len(metric.FieldList()))
for _, field := range metric.FieldList() {
val := field.Value
switch fv := field.Value.(type) {
case float64:
// JSON does not support these special values
if math.IsNaN(fv) || math.IsInf(fv, 0) {
continue
}
case string:
// Check for nested fields if any
if s.nestedfields != nil && s.nestedfields.Match(field.Key) {
bv := []byte(fv)
if json.Valid(bv) {
var nested interface{}
if err := json.Unmarshal(bv, &nested); err == nil {
val = nested
}
}
}
}
fields[field.Key] = val
}
m["fields"] = fields
m["name"] = metric.Name()
if s.TimestampFormat == "" {
m["timestamp"] = metric.Time().UnixNano() / int64(s.TimestampUnits)
} else {
m["timestamp"] = metric.Time().UTC().Format(s.TimestampFormat)
}
return m
}
func (s *Serializer) transform(obj interface{}) (interface{}, error) {
transformation, err := jsonata.Compile(s.Transformation)
if err != nil {
return nil, err
}
return transformation.Eval(obj)
}
func init() {
serializers.Add("json",
func() telegraf.Serializer {
return &Serializer{}
},
)
}

View file

@ -0,0 +1,514 @@
package json
import (
"encoding/json"
"fmt"
"math"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/influxdata/toml"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/parsers/influx"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/telegraf/testutil"
)
func TestSerializeMetricFloat(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
}
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
m := metric.New("cpu", tags, fields, now)
s := Serializer{}
require.NoError(t, s.Init())
buf, err := s.Serialize(m)
require.NoError(t, err)
expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":91.5},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n")
require.Equal(t, string(expS), string(buf))
}
func TestSerialize_TimestampUnits(t *testing.T) {
tests := []struct {
name string
timestampUnits time.Duration
timestampFormat string
expected string
}{
{
name: "default of 1s",
timestampUnits: 0,
expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":1525478795}`,
},
{
name: "1ns",
timestampUnits: 1 * time.Nanosecond,
expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":1525478795123456789}`,
},
{
name: "1ms",
timestampUnits: 1 * time.Millisecond,
expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":1525478795123}`,
},
{
name: "10ms",
timestampUnits: 10 * time.Millisecond,
expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":152547879512}`,
},
{
name: "15ms is reduced to 10ms",
timestampUnits: 15 * time.Millisecond,
expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":152547879512}`,
},
{
name: "65ms is reduced to 10ms",
timestampUnits: 65 * time.Millisecond,
expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":152547879512}`,
},
{
name: "timestamp format",
timestampFormat: "2006-01-02T15:04:05Z07:00",
expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":"2018-05-05T00:06:35Z"}`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(1525478795, 123456789),
)
s := Serializer{
TimestampUnits: config.Duration(tt.timestampUnits),
TimestampFormat: tt.timestampFormat,
}
require.NoError(t, s.Init())
actual, err := s.Serialize(m)
require.NoError(t, err)
require.Equal(t, tt.expected+"\n", string(actual))
})
}
}
func TestSerializeMetricInt(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
}
fields := map[string]interface{}{
"usage_idle": int64(90),
}
m := metric.New("cpu", tags, fields, now)
s := Serializer{}
require.NoError(t, s.Init())
buf, err := s.Serialize(m)
require.NoError(t, err)
expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":90},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n")
require.Equal(t, string(expS), string(buf))
}
func TestSerializeMetricString(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
}
fields := map[string]interface{}{
"usage_idle": "foobar",
}
m := metric.New("cpu", tags, fields, now)
s := Serializer{}
require.NoError(t, s.Init())
buf, err := s.Serialize(m)
require.NoError(t, err)
expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":"foobar"},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n")
require.Equal(t, string(expS), string(buf))
}
func TestSerializeMultiFields(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu": "cpu0",
}
fields := map[string]interface{}{
"usage_idle": int64(90),
"usage_total": 8559615,
}
m := metric.New("cpu", tags, fields, now)
s := Serializer{}
require.NoError(t, s.Init())
buf, err := s.Serialize(m)
require.NoError(t, err)
expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":90,"usage_total":8559615},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n")
require.Equal(t, string(expS), string(buf))
}
func TestSerializeMetricWithEscapes(t *testing.T) {
now := time.Now()
tags := map[string]string{
"cpu tag": "cpu0",
}
fields := map[string]interface{}{
"U,age=Idle": int64(90),
}
m := metric.New("My CPU", tags, fields, now)
s := Serializer{}
require.NoError(t, s.Init())
buf, err := s.Serialize(m)
require.NoError(t, err)
expS := []byte(fmt.Sprintf(`{"fields":{"U,age=Idle":90},"name":"My CPU","tags":{"cpu tag":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n")
require.Equal(t, string(expS), string(buf))
}
func TestSerializeBatch(t *testing.T) {
m := metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 42.0,
},
time.Unix(0, 0),
)
metrics := []telegraf.Metric{m, m}
s := Serializer{}
require.NoError(t, s.Init())
buf, err := s.SerializeBatch(metrics)
require.NoError(t, err)
require.JSONEq(
t,
`{"metrics":[{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0},{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0}]}`,
string(buf),
)
}
func TestSerializeBatchSkipInf(t *testing.T) {
metrics := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"inf": math.Inf(1),
"time_idle": 42,
},
time.Unix(0, 0),
),
}
s := Serializer{}
require.NoError(t, s.Init())
buf, err := s.SerializeBatch(metrics)
require.NoError(t, err)
require.JSONEq(t, `{"metrics":[{"fields":{"time_idle":42},"name":"cpu","tags":{},"timestamp":0}]}`, string(buf))
}
func TestSerializeBatchSkipInfAllFields(t *testing.T) {
metrics := []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"inf": math.Inf(1),
},
time.Unix(0, 0),
),
}
s := Serializer{}
require.NoError(t, s.Init())
buf, err := s.SerializeBatch(metrics)
require.NoError(t, err)
require.JSONEq(t, `{"metrics":[{"fields":{},"name":"cpu","tags":{},"timestamp":0}]}`, string(buf))
}
func TestSerializeTransformationNonBatch(t *testing.T) {
var tests = []struct {
name string
filename string
}{
{
name: "non-batch transformation test",
filename: "testcases/transformation_single.conf",
},
}
parser := &influx.Parser{}
require.NoError(t, parser.Init())
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
filename := filepath.FromSlash(tt.filename)
cfg, header, err := loadTestConfiguration(filename)
require.NoError(t, err)
// Get the input metrics
metrics, err := testutil.ParseMetricsFrom(header, "Input:", parser)
require.NoError(t, err)
// Get the expectations
expectedArray, err := loadJSON(strings.TrimSuffix(filename, ".conf") + "_out.json")
require.NoError(t, err)
expected := expectedArray.([]interface{})
// Serialize
serializer := Serializer{
TimestampUnits: config.Duration(cfg.TimestampUnits),
TimestampFormat: cfg.TimestampFormat,
Transformation: cfg.Transformation,
}
require.NoError(t, serializer.Init())
for i, m := range metrics {
buf, err := serializer.Serialize(m)
require.NoError(t, err)
// Compare
var actual interface{}
require.NoError(t, json.Unmarshal(buf, &actual))
require.EqualValuesf(t, expected[i], actual, "mismatch in %d", i)
}
})
}
}
func TestSerializeTransformationBatch(t *testing.T) {
var tests = []struct {
name string
filename string
}{
{
name: "batch transformation test",
filename: "testcases/transformation_batch.conf",
},
}
parser := &influx.Parser{}
require.NoError(t, parser.Init())
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
filename := filepath.FromSlash(tt.filename)
cfg, header, err := loadTestConfiguration(filename)
require.NoError(t, err)
// Get the input metrics
metrics, err := testutil.ParseMetricsFrom(header, "Input:", parser)
require.NoError(t, err)
// Get the expectations
expected, err := loadJSON(strings.TrimSuffix(filename, ".conf") + "_out.json")
require.NoError(t, err)
// Serialize
serializer := Serializer{
TimestampUnits: config.Duration(cfg.TimestampUnits),
TimestampFormat: cfg.TimestampFormat,
Transformation: cfg.Transformation,
}
require.NoError(t, serializer.Init())
buf, err := serializer.SerializeBatch(metrics)
require.NoError(t, err)
// Compare
var actual interface{}
require.NoError(t, json.Unmarshal(buf, &actual))
require.EqualValues(t, expected, actual)
})
}
}
func TestSerializeTransformationIssue12734(t *testing.T) {
input := []telegraf.Metric{
metric.New(
"data",
map[string]string{"key": "a"},
map[string]interface{}{"value": 10.1},
time.Unix(0, 1676285135457000000),
),
metric.New(
"data",
map[string]string{"key": "b"},
map[string]interface{}{"value": 20.2},
time.Unix(0, 1676285135457000000),
),
metric.New(
"data",
map[string]string{"key": "c"},
map[string]interface{}{"value": 30.3},
time.Unix(0, 1676285135457000000),
),
}
transformation := `
{
"valueRows": metrics{$string(timestamp): fields.value[]} ~> $each(function($v, $k) {
{
"timestamp": $number($k),
"values": $v
}
})
}
`
expected := map[string]interface{}{
"valueRows": map[string]interface{}{
"timestamp": 1.676285135e+9,
"values": []interface{}{10.1, 20.2, 30.3},
},
}
// Setup serializer
serializer := Serializer{
Transformation: transformation,
}
require.NoError(t, serializer.Init())
// Check multiple serializations as issue #12734 shows that the
// transformation breaks after the first iteration
for i := 1; i <= 3; i++ {
buf, err := serializer.SerializeBatch(input)
require.NoErrorf(t, err, "broke in iteration %d", i)
// Compare
var actual interface{}
require.NoError(t, json.Unmarshal(buf, &actual))
require.EqualValuesf(t, expected, actual, "broke in iteration %d", i)
}
}
func TestSerializeNesting(t *testing.T) {
var tests = []struct {
name string
filename string
out string
}{
{
name: "nested fields include",
filename: "testcases/nested_fields_include.conf",
out: "testcases/nested_fields_out.json",
},
{
name: "nested fields exclude",
filename: "testcases/nested_fields_exclude.conf",
out: "testcases/nested_fields_out.json",
},
}
parser := &influx.Parser{}
require.NoError(t, parser.Init())
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
filename := filepath.FromSlash(tt.filename)
cfg, header, err := loadTestConfiguration(filename)
require.NoError(t, err)
// Get the input metrics
metrics, err := testutil.ParseMetricsFrom(header, "Input:", parser)
require.NoError(t, err)
require.Len(t, metrics, 1)
// Get the expectations
expectedArray, err := loadJSON(tt.out)
require.NoError(t, err)
expected := expectedArray.(map[string]interface{})
// Serialize
serializer := Serializer{
TimestampUnits: config.Duration(cfg.TimestampUnits),
TimestampFormat: cfg.TimestampFormat,
Transformation: cfg.Transformation,
NestedFieldsInclude: cfg.JSONNestedFieldsInclude,
NestedFieldsExclude: cfg.JSONNestedFieldsExclude,
}
require.NoError(t, serializer.Init())
buf, err := serializer.Serialize(metrics[0])
require.NoError(t, err)
// Compare
var actual interface{}
require.NoError(t, json.Unmarshal(buf, &actual))
require.EqualValues(t, expected, actual)
})
}
}
type Config struct {
TimestampUnits time.Duration `toml:"json_timestamp_units"`
TimestampFormat string `toml:"json_timestamp_format"`
Transformation string `toml:"json_transformation"`
JSONNestedFieldsInclude []string `toml:"json_nested_fields_include"`
JSONNestedFieldsExclude []string `toml:"json_nested_fields_exclude"`
}
func loadTestConfiguration(filename string) (*Config, []string, error) {
buf, err := os.ReadFile(filename)
if err != nil {
return nil, nil, err
}
header := make([]string, 0)
for _, line := range strings.Split(string(buf), "\n") {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "#") {
header = append(header, line)
}
}
var cfg Config
err = toml.Unmarshal(buf, &cfg)
return &cfg, header, err
}
func loadJSON(filename string) (interface{}, error) {
buf, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
var data interface{}
err = json.Unmarshal(buf, &data)
return data, err
}
func BenchmarkSerialize(b *testing.B) {
s := &Serializer{}
require.NoError(b, s.Init())
metrics := serializers.BenchmarkMetrics(b)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.Serialize(metrics[i%len(metrics)])
require.NoError(b, err)
}
}
func BenchmarkSerializeBatch(b *testing.B) {
s := &Serializer{}
require.NoError(b, s.Init())
m := serializers.BenchmarkMetrics(b)
metrics := m[:]
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.SerializeBatch(metrics)
require.NoError(b, err)
}
}

View file

@ -0,0 +1,6 @@
# Example for decoding fields that contain nested JSON structures.
#
# Input:
# in,host=myhost,type=diagnostic hops=10,latency=1.23,id-1234="{\"address\": \"AB1A\", \"status\": \"online\"}",id-0000="{\"status\": \"offline\"}",id-5678="{\"address\": \"0000\", \"status\": \"online\"}" 1666006350000000000
json_nested_fields_exclude = ["hops", "latency"]

View file

@ -0,0 +1,6 @@
# Example for decoding fields that contain nested JSON structures.
#
# Input:
# in,host=myhost,type=diagnostic hops=10,latency=1.23,id-1234="{\"address\": \"AB1A\", \"status\": \"online\"}",id-0000="{\"status\": \"offline\"}",id-5678="{\"address\": \"0000\", \"status\": \"online\"}" 1666006350000000000
json_nested_fields_include = ["id-*"]

View file

@ -0,0 +1,23 @@
{
"fields": {
"id-1234": {
"address": "AB1A",
"status": "online"
},
"id-0000": {
"status": "offline"
},
"id-5678": {
"address": "0000",
"status": "online"
},
"hops": 10,
"latency": 1.23
},
"name": "in",
"tags": {
"host": "myhost",
"type": "diagnostic"
},
"timestamp": 1666006350
}

View file

@ -0,0 +1,24 @@
# Example for transforming the output JSON with batch metrics.
#
# Input:
# impression,flagname=F5,host=1cbbb3796fc2,key=12345,platform=Java,sdkver=4.9.1,value=false count_sum=5i 1653643420000000000
# expression,flagname=E42,host=klaus,key=67890,platform=Golang,sdkver=1.18.3,value=true count_sum=42i 1653646789000000000
json_transformation = '''
metrics.{
"sdkVersion": tags.sdkver,
"time": timestamp,
"platform": platform,
"key": tags.key,
"events": [
{
"time": timestamp,
"flag": tags.flagname,
"experimentVersion": 0,
"value": tags.value,
"type": $uppercase(name),
"count": fields.count_sum
}
]
}
'''

View file

@ -0,0 +1,32 @@
[
{
"sdkVersion": "4.9.1",
"time": 1653643420,
"key": "12345",
"events": [
{
"time": 1653643420,
"flag": "F5",
"experimentVersion": 0,
"value": "false",
"type": "IMPRESSION",
"count": 5
}
]
},
{
"sdkVersion": "1.18.3",
"time": 1653646789,
"key": "67890",
"events": [
{
"time": 1653646789,
"flag": "E42",
"experimentVersion": 0,
"value": "true",
"type": "EXPRESSION",
"count": 42
}
]
}
]

View file

@ -0,0 +1,24 @@
# Example for transforming the output JSON in non-batch mode.
#
# Input:
# impression,flagname=F5,host=1cbbb3796fc2,key=12345,platform=Java,sdkver=4.9.1,value=false count_sum=5i 1653643420000000000
# expression,flagname=E42,host=klaus,key=67890,platform=Golang,sdkver=1.18.3,value=true count_sum=42i 1653646789000000000
json_transformation = '''
{
"sdkVersion": tags.sdkver,
"time": timestamp,
"platform": platform,
"key": tags.key,
"events": [
{
"time": timestamp,
"flag": tags.flagname,
"experimentVersion": 0,
"value": tags.value,
"type": $uppercase(name),
"count": fields.count_sum
}
]
}
'''

View file

@ -0,0 +1,32 @@
[
{
"sdkVersion": "4.9.1",
"time": 1653643420,
"key": "12345",
"events": [
{
"time": 1653643420,
"flag": "F5",
"experimentVersion": 0,
"value": "false",
"type": "IMPRESSION",
"count": 5
}
]
},
{
"sdkVersion": "1.18.3",
"time": 1653646789,
"key": "67890",
"events": [
{
"time": 1653646789,
"flag": "E42",
"experimentVersion": 0,
"value": "true",
"type": "EXPRESSION",
"count": 42
}
]
}
]

View file

@ -0,0 +1,43 @@
# MessagePack
[MessagePack](https://msgpack.org) is an efficient binary serialization format. It lets you exchange data among multiple languages like JSON.
## Format Definitions
Output of this format is MessagePack binary representation of metrics that have identical structure of the below JSON.
```json
{
"name":"cpu",
"time": <TIMESTAMP>, // https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type
"tags":{
"tag_1":"host01",
...
},
"fields":{
"field_1":30,
"field_2":true,
"field_3":"field_value"
"field_4":30.1
...
}
}
```
MessagePack has it's own timestamp representation. You can find additional information from [MessagePack specification](https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type).
## MessagePack Configuration
There are no additional configuration options for MessagePack format.
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "msgpack"
```

View file

@ -0,0 +1,104 @@
package msgpack
import (
"encoding/binary"
"time"
"github.com/tinylib/msgp/msgp"
)
//go:generate msgp
// Metric is structure to define MessagePack message format
// will be used by msgp code generator
type Metric struct {
Name string `msg:"name"`
Time MessagePackTime `msg:"time,extension"`
Tags map[string]string `msg:"tags"`
Fields map[string]interface{} `msg:"fields"`
}
// MessagePackTime implements the official timestamp extension type
// https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type
//
// tinylib/msgp has been using their own custom extension type and the official extension
// is not available. (https://github.com/tinylib/msgp/issues/214)
type MessagePackTime struct {
time time.Time
}
func init() {
msgp.RegisterExtension(-1, func() msgp.Extension { return new(MessagePackTime) })
}
// ExtensionType implements the Extension interface
func (*MessagePackTime) ExtensionType() int8 {
return -1
}
// Len implements the Extension interface
// The timestamp extension uses variable length encoding depending the input
//
// 32bits: [1970-01-01 00:00:00 UTC, 2106-02-07 06:28:16 UTC) range. If the nanoseconds part is 0
// 64bits: [1970-01-01 00:00:00.000000000 UTC, 2514-05-30 01:53:04.000000000 UTC) range.
// 96bits: [-584554047284-02-23 16:59:44 UTC, 584554051223-11-09 07:00:16.000000000 UTC) range.
func (z *MessagePackTime) Len() int {
sec := z.time.Unix()
nsec := z.time.Nanosecond()
if sec < 0 || sec >= (1<<34) { // 96 bits encoding
return 12
}
if sec >= (1<<32) || nsec != 0 {
return 8
}
return 4
}
// MarshalBinaryTo implements the Extension interface
func (z *MessagePackTime) MarshalBinaryTo(buf []byte) error {
length := z.Len()
if length == 4 {
sec := z.time.Unix()
binary.BigEndian.PutUint32(buf, uint32(sec))
} else if length == 8 {
sec := z.time.Unix()
nsec := z.time.Nanosecond()
data := uint64(nsec)<<34 | (uint64(sec) & 0x03_ffff_ffff)
binary.BigEndian.PutUint64(buf, data)
} else if length == 12 {
sec := z.time.Unix()
nsec := z.time.Nanosecond()
binary.BigEndian.PutUint32(buf, uint32(nsec))
binary.BigEndian.PutUint64(buf[4:], uint64(sec))
}
return nil
}
// UnmarshalBinary implements the Extension interface
func (z *MessagePackTime) UnmarshalBinary(buf []byte) error {
length := len(buf)
if length == 4 {
sec := binary.BigEndian.Uint32(buf)
z.time = time.Unix(int64(sec), 0)
} else if length == 8 {
data := binary.BigEndian.Uint64(buf)
nsec := (data & 0xfffffffc_00000000) >> 34
sec := data & 0x00000003_ffffffff
z.time = time.Unix(int64(sec), int64(nsec))
} else if length == 12 {
nsec := binary.BigEndian.Uint32(buf)
sec := binary.BigEndian.Uint64(buf[4:])
z.time = time.Unix(int64(sec), int64(nsec))
}
return nil
}

View file

@ -0,0 +1,417 @@
package msgpack
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *MessagePackTime) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z MessagePackTime) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 0
err = en.Append(0x80)
if err != nil {
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z MessagePackTime) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 0
o = append(o, 0x80)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MessagePackTime) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z MessagePackTime) Msgsize() (s int) {
s = 1
return
}
// DecodeMsg implements msgp.Decodable
func (z *Metric) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "name":
z.Name, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "time":
err = dc.ReadExtension(&z.Time)
if err != nil {
err = msgp.WrapError(err, "Time")
return
}
case "tags":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
if z.Tags == nil {
z.Tags = make(map[string]string, zb0002)
} else if len(z.Tags) > 0 {
for key := range z.Tags {
delete(z.Tags, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 string
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
za0002, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Tags", za0001)
return
}
z.Tags[za0001] = za0002
}
case "fields":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Fields")
return
}
if z.Fields == nil {
z.Fields = make(map[string]interface{}, zb0003)
} else if len(z.Fields) > 0 {
for key := range z.Fields {
delete(z.Fields, key)
}
}
for zb0003 > 0 {
zb0003--
var za0003 string
var za0004 interface{}
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Fields")
return
}
za0004, err = dc.ReadIntf()
if err != nil {
err = msgp.WrapError(err, "Fields", za0003)
return
}
z.Fields[za0003] = za0004
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *Metric) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 4
// write "name"
err = en.Append(0x84, 0xa4, 0x6e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Name)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
// write "time"
err = en.Append(0xa4, 0x74, 0x69, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteExtension(&z.Time)
if err != nil {
err = msgp.WrapError(err, "Time")
return
}
// write "tags"
err = en.Append(0xa4, 0x74, 0x61, 0x67, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.Tags)))
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
for za0001, za0002 := range z.Tags {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
err = en.WriteString(za0002)
if err != nil {
err = msgp.WrapError(err, "Tags", za0001)
return
}
}
// write "fields"
err = en.Append(0xa6, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.Fields)))
if err != nil {
err = msgp.WrapError(err, "Fields")
return
}
for za0003, za0004 := range z.Fields {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "Fields")
return
}
err = en.WriteIntf(za0004)
if err != nil {
err = msgp.WrapError(err, "Fields", za0003)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *Metric) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 4
// string "name"
o = append(o, 0x84, 0xa4, 0x6e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "time"
o = append(o, 0xa4, 0x74, 0x69, 0x6d, 0x65)
o, err = msgp.AppendExtension(o, &z.Time)
if err != nil {
err = msgp.WrapError(err, "Time")
return
}
// string "tags"
o = append(o, 0xa4, 0x74, 0x61, 0x67, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.Tags)))
for za0001, za0002 := range z.Tags {
o = msgp.AppendString(o, za0001)
o = msgp.AppendString(o, za0002)
}
// string "fields"
o = append(o, 0xa6, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.Fields)))
for za0003, za0004 := range z.Fields {
o = msgp.AppendString(o, za0003)
o, err = msgp.AppendIntf(o, za0004)
if err != nil {
err = msgp.WrapError(err, "Fields", za0003)
return
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Metric) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "time":
bts, err = msgp.ReadExtensionBytes(bts, &z.Time)
if err != nil {
err = msgp.WrapError(err, "Time")
return
}
case "tags":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
if z.Tags == nil {
z.Tags = make(map[string]string, zb0002)
} else if len(z.Tags) > 0 {
for key := range z.Tags {
delete(z.Tags, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 string
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags", za0001)
return
}
z.Tags[za0001] = za0002
}
case "fields":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Fields")
return
}
if z.Fields == nil {
z.Fields = make(map[string]interface{}, zb0003)
} else if len(z.Fields) > 0 {
for key := range z.Fields {
delete(z.Fields, key)
}
}
for zb0003 > 0 {
var za0003 string
var za0004 interface{}
zb0003--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Fields")
return
}
za0004, bts, err = msgp.ReadIntfBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Fields", za0003)
return
}
z.Fields[za0003] = za0004
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Metric) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 5 + msgp.ExtensionPrefixSize + z.Time.Len() + 5 + msgp.MapHeaderSize
if z.Tags != nil {
for za0001, za0002 := range z.Tags {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
}
}
s += 7 + msgp.MapHeaderSize
if z.Fields != nil {
for za0003, za0004 := range z.Fields {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + msgp.GuessSize(za0004)
}
}
return
}

View file

@ -0,0 +1,236 @@
package msgpack
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalMessagePackTime(t *testing.T) {
v := MessagePackTime{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMessagePackTime(b *testing.B) {
v := MessagePackTime{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMessagePackTime(b *testing.B) {
v := MessagePackTime{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMessagePackTime(b *testing.B) {
v := MessagePackTime{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeMessagePackTime(t *testing.T) {
v := MessagePackTime{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeMessagePackTime Msgsize() is inaccurate")
}
vn := MessagePackTime{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeMessagePackTime(b *testing.B) {
v := MessagePackTime{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeMessagePackTime(b *testing.B) {
v := MessagePackTime{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMetric(t *testing.T) {
v := Metric{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMetric(b *testing.B) {
v := Metric{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMetric(b *testing.B) {
v := Metric{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMetric(b *testing.B) {
v := Metric{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeMetric(t *testing.T) {
v := Metric{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeMetric Msgsize() is inaccurate")
}
vn := Metric{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeMetric(b *testing.B) {
v := Metric{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeMetric(b *testing.B) {
v := Metric{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View file

@ -0,0 +1,162 @@
package msgpack
import (
"encoding/hex"
"math"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestMsgPackTime32(t *testing.T) {
// Maximum of 4 bytes encodable time
var sec int64 = 0xFFFFFFFF
var nsec int64
t1 := MessagePackTime{time: time.Unix(sec, nsec)}
require.Equal(t, 4, t1.Len())
buf := make([]byte, t1.Len())
require.NoError(t, t1.MarshalBinaryTo(buf))
t2 := new(MessagePackTime)
err := t2.UnmarshalBinary(buf)
require.NoError(t, err)
require.Equal(t, t1.time, t2.time)
}
func TestMsgPackTime64(t *testing.T) {
// Maximum of 8 bytes encodable time
var sec int64 = 0x3FFFFFFFF
var nsec int64 = 999999999
t1 := MessagePackTime{time: time.Unix(sec, nsec)}
require.Equal(t, 8, t1.Len())
buf := make([]byte, t1.Len())
require.NoError(t, t1.MarshalBinaryTo(buf))
t2 := new(MessagePackTime)
err := t2.UnmarshalBinary(buf)
require.NoError(t, err)
require.Equal(t, t1.time, t2.time)
}
func TestMsgPackTime96(t *testing.T) {
// Testing 12 bytes timestamp
var sec int64 = 0x400000001
var nsec int64 = 111111111
t1 := MessagePackTime{time: time.Unix(sec, nsec)}
require.Equal(t, 12, t1.Len())
buf := make([]byte, t1.Len())
require.NoError(t, t1.MarshalBinaryTo(buf))
t2 := new(MessagePackTime)
err := t2.UnmarshalBinary(buf)
require.NoError(t, err)
require.True(t, t1.time.Equal(t2.time))
// Testing the default value: 0001-01-01T00:00:00Z
t1 = MessagePackTime{}
require.Equal(t, 12, t1.Len())
require.NoError(t, t1.MarshalBinaryTo(buf))
t2 = new(MessagePackTime)
err = t2.UnmarshalBinary(buf)
require.NoError(t, err)
require.True(t, t1.time.Equal(t2.time))
}
func TestMsgPackTimeEdgeCases(t *testing.T) {
times := make([]time.Time, 0)
expected := make([][]byte, 0)
// Unix epoch. Begin of 4bytes dates
// Nanoseconds: 0x00000000, Seconds: 0x0000000000000000
ts, err := time.Parse(time.RFC3339, "1970-01-01T00:00:00Z")
require.NoError(t, err)
bs, err := hex.DecodeString("d6ff00000000")
require.NoError(t, err)
times = append(times, ts)
expected = append(expected, bs)
// End of 4bytes dates
// Nanoseconds: 0x00000000, Seconds: 0x00000000ffffffff
ts, err = time.Parse(time.RFC3339, "2106-02-07T06:28:15Z")
require.NoError(t, err)
bs, err = hex.DecodeString("d6ffffffffff")
require.NoError(t, err)
times = append(times, ts)
expected = append(expected, bs)
// Begin of 8bytes dates
// Nanoseconds: 0x00000000, Seconds: 0x0000000100000000
ts, err = time.Parse(time.RFC3339, "2106-02-07T06:28:16Z")
require.NoError(t, err)
bs, err = hex.DecodeString("d7ff0000000100000000")
require.NoError(t, err)
times = append(times, ts)
expected = append(expected, bs)
// Just after Unix epoch. Non zero nanoseconds
// Nanoseconds: 0x00000001, Seconds: 0x0000000000000000
ts, err = time.Parse(time.RFC3339Nano, "1970-01-01T00:00:00.000000001Z")
require.NoError(t, err)
bs, err = hex.DecodeString("d7ff0000000400000000")
require.NoError(t, err)
times = append(times, ts)
expected = append(expected, bs)
// End of 8bytes dates
// Nanoseconds: 0x00000000, Seconds: 0x00000003ffffffff
ts, err = time.Parse(time.RFC3339Nano, "2514-05-30T01:53:03.000000000Z")
require.NoError(t, err)
bs, err = hex.DecodeString("d7ff00000003ffffffff")
require.NoError(t, err)
times = append(times, ts)
expected = append(expected, bs)
// Begin of 12bytes date
// Nanoseconds: 0x00000000, Seconds: 0x0000000400000000
ts, err = time.Parse(time.RFC3339Nano, "2514-05-30T01:53:04.000000000Z")
require.NoError(t, err)
bs, err = hex.DecodeString("c70cff000000000000000400000000")
require.NoError(t, err)
times = append(times, ts)
expected = append(expected, bs)
// Zero value, 0001-01-01T00:00:00Z
// Nanoseconds: 0x00000000, Seconds: 0xfffffff1886e0900
ts = time.Time{}
bs, err = hex.DecodeString("c70cff00000000fffffff1886e0900")
require.NoError(t, err)
times = append(times, ts)
expected = append(expected, bs)
// Max value
// Nanoseconds: 0x3b9ac9ff, Seconds: 0x7fffffffffffffff
ts = time.Unix(math.MaxInt64, 999_999_999).UTC()
bs, err = hex.DecodeString("c70cff3b9ac9ff7fffffffffffffff")
require.NoError(t, err)
times = append(times, ts)
expected = append(expected, bs)
buf := make([]byte, 0)
for i, ts := range times {
t1 := MessagePackTime{time: ts}
m := Metric{Time: t1}
buf = buf[:0]
buf, err = m.MarshalMsg(buf)
require.NoError(t, err)
require.Equal(t, expected[i], buf[12:len(buf)-14])
}
}

View file

@ -0,0 +1,47 @@
package msgpack
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/serializers"
)
// Serializer encodes metrics in MessagePack format
type Serializer struct{}
func marshalMetric(buf []byte, metric telegraf.Metric) ([]byte, error) {
return (&Metric{
Name: metric.Name(),
Time: MessagePackTime{time: metric.Time()},
Tags: metric.Tags(),
Fields: metric.Fields(),
}).MarshalMsg(buf)
}
// Serialize implements serializers.Serializer.Serialize
// github.com/influxdata/telegraf/plugins/serializers/Serializer
func (*Serializer) Serialize(metric telegraf.Metric) ([]byte, error) {
return marshalMetric(nil, metric)
}
// SerializeBatch implements serializers.Serializer.SerializeBatch
// github.com/influxdata/telegraf/plugins/serializers/Serializer
func (*Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
buf := make([]byte, 0)
for _, m := range metrics {
var err error
buf, err = marshalMetric(buf, m)
if err != nil {
return nil, err
}
}
return buf, nil
}
func init() {
serializers.Add("msgpack",
func() telegraf.Serializer {
return &Serializer{}
},
)
}

View file

@ -0,0 +1,154 @@
package msgpack
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/telegraf/testutil"
)
func toTelegrafMetric(m Metric) telegraf.Metric {
tm := metric.New(m.Name, m.Tags, m.Fields, m.Time.time)
return tm
}
func TestSerializeMetricInt(t *testing.T) {
m := testutil.TestMetric(int64(90))
s := Serializer{}
var buf []byte
buf, err := s.Serialize(m)
require.NoError(t, err)
m2 := &Metric{}
left, err := m2.UnmarshalMsg(buf)
require.NoError(t, err)
require.Empty(t, left)
testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2))
}
func TestSerializeMetricString(t *testing.T) {
m := testutil.TestMetric("foobar")
s := Serializer{}
var buf []byte
buf, err := s.Serialize(m)
require.NoError(t, err)
m2 := &Metric{}
left, err := m2.UnmarshalMsg(buf)
require.NoError(t, err)
require.Empty(t, left)
testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2))
}
func TestSerializeMultiFields(t *testing.T) {
m := testutil.TestMetric(90)
m.AddField("value2", 8559615)
s := Serializer{}
var buf []byte
buf, err := s.Serialize(m)
require.NoError(t, err)
m2 := &Metric{}
left, err := m2.UnmarshalMsg(buf)
require.NoError(t, err)
require.Empty(t, left)
testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2))
}
func TestSerializeMetricWithEscapes(t *testing.T) {
m := testutil.TestMetric(90)
m.AddField("U,age=Idle", int64(90))
m.AddTag("cpu tag", "cpu0")
s := Serializer{}
var buf []byte
buf, err := s.Serialize(m)
require.NoError(t, err)
m2 := &Metric{}
left, err := m2.UnmarshalMsg(buf)
require.NoError(t, err)
require.Empty(t, left)
testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2))
}
func TestSerializeMultipleMetric(t *testing.T) {
m := testutil.TestMetric(90)
s := Serializer{}
encoded, err := s.Serialize(m)
require.NoError(t, err)
// Multiple metrics in continuous bytes stream
var buf []byte
buf = append(buf, encoded...)
buf = append(buf, encoded...)
buf = append(buf, encoded...)
buf = append(buf, encoded...)
left := buf
for len(left) > 0 {
decodeM := &Metric{}
left, err = decodeM.UnmarshalMsg(left)
require.NoError(t, err)
testutil.RequireMetricEqual(t, m, toTelegrafMetric(*decodeM))
}
}
func TestSerializeBatch(t *testing.T) {
m := testutil.TestMetric(90)
metrics := []telegraf.Metric{m, m, m, m}
s := Serializer{}
buf, err := s.SerializeBatch(metrics)
require.NoError(t, err)
left := buf
for len(left) > 0 {
decodeM := &Metric{}
left, err = decodeM.UnmarshalMsg(left)
require.NoError(t, err)
testutil.RequireMetricEqual(t, m, toTelegrafMetric(*decodeM))
}
}
func BenchmarkSerialize(b *testing.B) {
s := &Serializer{}
metrics := serializers.BenchmarkMetrics(b)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.Serialize(metrics[i%len(metrics)])
require.NoError(b, err)
}
}
func BenchmarkSerializeBatch(b *testing.B) {
s := &Serializer{}
m := serializers.BenchmarkMetrics(b)
metrics := m[:]
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.SerializeBatch(metrics)
require.NoError(b, err)
}
}

View file

@ -0,0 +1,135 @@
# ServiceNow Metrics serializer
The ServiceNow Metrics serializer outputs metrics in the
[ServiceNow Operational Intelligence format][ServiceNow-format] or optionally
with the [ServiceNow JSONv2 format][ServiceNow-jsonv2]
It can be used to write to a file using the file output, or for sending metrics
to a MID Server with Enable REST endpoint activated using the standard telegraf
HTTP output. If you are using the HTTP output, this serializer knows how to
batch the metrics so you do not end up with an HTTP POST per metric.
[ServiceNow-format]: https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/reference/mid-POST-metrics.html
[ServiceNow-jsonv2]: https://docs.servicenow.com/bundle/tokyo-application-development/page/integrate/inbound-other-web-services/concept/c_JSONv2WebService.html
An example Operational Intelligence format event looks like:
```json
[
{
"metric_type": "Disk C: % Free Space",
"resource": "C:\\",
"node": "lnux100",
"value": 50,
"timestamp": 1473183012000,
"ci2metric_id": {
"node": "lnux100"
},
"source": "Telegraf"
}
]
```
An example of the JSONv2 format even looks like:
```json
{
"records": [
{
"metric_type": "Disk C: % Free Space",
"resource": "C:\\",
"node": "lnux100",
"value": 50,
"timestamp": 1473183012000,
"ci2metric_id": {
"node": "lnux100"
},
"source": "Telegraf"
}
]
}
```
## Using with the HTTP output
To send this data to a ServiceNow MID Server with Web Server extension activated, you can use the HTTP output, there are some custom headers that you need to add to manage the MID Web Server authorization, here's a sample config for an HTTP output:
```toml
[[outputs.http]]
## URL is the address to send metrics to
url = "http://<mid server fqdn or ip address>:9082/api/mid/sa/metrics"
## Timeout for HTTP message
# timeout = "5s"
## HTTP method, one of: "POST" or "PUT"
method = "POST"
## HTTP Basic Auth credentials
username = 'evt.integration'
password = 'P@$$w0rd!'
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "nowmetric"
## Format Type
## By default, the serializer returns an array of metrics matching the
## Now Metric Operational Intelligence format or with the option set to 'oi'.
## Optionally, if set to 'jsonv2' the output format will involve the newer
## JSON object based format.
# nowmetric_format = "oi"
## Additional HTTP headers
[outputs.http.headers]
# # Should be set manually to "application/json" for json data_format
Content-Type = "application/json"
Accept = "application/json"
```
Starting with the [London release](https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/task/event-rule-bind-metrics-to-host.html
),
you also need to explicitly create event rule to allow binding of metric events to host CIs.
## Metric Format
The following describes the two options of the `nowmetric_format` option:
The Operational Intelligence format is used along with the
`/api/mid/sa/metrics` API endpoint. The payload is requires a JSON array full
of metrics. This is the default settings and used when set to `oi`. See the
[ServiceNow KB0853084][KB0853084] for more details on this format.
Another option is the use of the [JSONv2 web service][jsonv2]. This service
requires a different format that is [JSON object based][jsonv2_format]. This
option is used when set to `jsonv2`.
[KB0853084]: https://support.servicenow.com/kb?id=kb_article_view&sysparm_article=KB0853084
[jsonv2]: https://docs.servicenow.com/bundle/tokyo-application-development/page/integrate/inbound-other-web-services/concept/c_JSONv2WebService.html
[jsonv2_format]: https://docs.servicenow.com/bundle/tokyo-application-development/page/integrate/inbound-other-web-services/concept/c_JSONObjectFormat.html
## Using with the File output
You can use the file output to output the payload in a file.
In this case, just add the following section to your telegraf config file
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["C:/Telegraf/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "nowmetric"
```

View file

@ -0,0 +1,137 @@
package nowmetric
import (
"encoding/json"
"fmt"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/serializers"
)
type Serializer struct {
Format string `toml:"nowmetric_format"`
}
type OIMetric struct {
Metric string `json:"metric_type"`
Resource string `json:"resource"`
Node string `json:"node"`
Value interface{} `json:"value"`
Timestamp int64 `json:"timestamp"`
CiMapping map[string]string `json:"ci2metric_id"`
Source string `json:"source"`
}
type OIMetrics []OIMetric
type OIMetricsObj struct {
Records []OIMetric `json:"records"`
}
func (s *Serializer) Init() error {
switch s.Format {
case "":
s.Format = "oi"
case "oi", "jsonv2":
default:
return fmt.Errorf("invalid format %q", s.Format)
}
return nil
}
func (s *Serializer) Serialize(metric telegraf.Metric) (out []byte, err error) {
m := createObject(metric)
if s.Format == "jsonv2" {
obj := OIMetricsObj{Records: m}
return json.Marshal(obj)
}
return json.Marshal(m)
}
func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) (out []byte, err error) {
objects := make([]OIMetric, 0)
for _, metric := range metrics {
objects = append(objects, createObject(metric)...)
}
if s.Format == "jsonv2" {
obj := OIMetricsObj{Records: objects}
return json.Marshal(obj)
}
return json.Marshal(objects)
}
func createObject(metric telegraf.Metric) OIMetrics {
/* ServiceNow Operational Intelligence supports an array of JSON objects.
** Following elements accepted in the request body:
** metric_type: The name of the metric
** resource: Information about the resource for which metric data is being collected.
In the example below, C:\ is the resource for which metric data is collected
** node: IP, FQDN, name of the CI, or host
** value: Value of the metric
** timestamp: Epoch timestamp of the metric in milliseconds
** ci2metric_id: List of key-value pairs to identify the CI.
** source: Data source monitoring the metric type
*/
var allmetrics OIMetrics //nolint:prealloc // Pre-allocating may change format of marshaled JSON
var oimetric OIMetric
oimetric.Source = "Telegraf"
// Process Tags to extract node & resource name info
for _, tag := range metric.TagList() {
if tag.Key == "" || tag.Value == "" {
continue
}
if tag.Key == "objectname" {
oimetric.Resource = tag.Value
}
if tag.Key == "host" {
oimetric.Node = tag.Value
}
}
// Format timestamp to UNIX epoch
oimetric.Timestamp = metric.Time().UnixNano() / int64(time.Millisecond)
// Loop of fields value pair and build datapoint for each of them
for _, field := range metric.FieldList() {
if !verifyValue(field.Value) {
// Ignore String
continue
}
if field.Key == "" {
// Ignore Empty Key
continue
}
oimetric.Metric = field.Key
oimetric.Value = field.Value
if oimetric.Node != "" {
oimetric.CiMapping = map[string]string{"node": oimetric.Node}
}
allmetrics = append(allmetrics, oimetric)
}
return allmetrics
}
func verifyValue(v interface{}) bool {
_, ok := v.(string)
return !ok
}
func init() {
serializers.Add("nowmetric",
func() telegraf.Serializer {
return &Serializer{}
},
)
}

Some files were not shown because too many files have changed in this diff Show more