Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
70
plugins/processors/parser/README.md
Normal file
70
plugins/processors/parser/README.md
Normal file
|
@ -0,0 +1,70 @@
|
|||
# Parser Processor Plugin
|
||||
|
||||
This plugin parses defined fields or tags containing the specified data format
|
||||
and creates new metrics based on the contents of the field or tag.
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Parse a value in a specified field(s)/tag(s) and add the result in a new metric
|
||||
[[processors.parser]]
|
||||
## The name of the fields whose value will be parsed.
|
||||
parse_fields = ["message"]
|
||||
|
||||
## Fields to base64 decode.
|
||||
## These fields do not need to be specified in parse_fields.
|
||||
## Fields specified here will have base64 decode applied to them.
|
||||
# parse_fields_base64 = []
|
||||
|
||||
## The name of the tags whose value will be parsed.
|
||||
# parse_tags = []
|
||||
|
||||
## If true, incoming metrics are not emitted.
|
||||
# drop_original = false
|
||||
|
||||
## Merge Behavior
|
||||
## Only has effect when drop_original is set to false. Possible options
|
||||
## include:
|
||||
## * override: emitted metrics are merged by overriding the original metric
|
||||
## using the newly parsed metrics, but retains the original metric
|
||||
## timestamp.
|
||||
## * override-with-timestamp: the same as "override", but the timestamp is
|
||||
## set based on the new metrics if present.
|
||||
# merge = ""
|
||||
|
||||
## The dataformat to be read from files
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
```toml
|
||||
[[processors.parser]]
|
||||
parse_fields = ["message"]
|
||||
merge = "override"
|
||||
data_format = "logfmt"
|
||||
```
|
||||
|
||||
### Input
|
||||
|
||||
```text
|
||||
syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org\ (influxdb.example.org),severity=info facility_code=3i,message=" ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"",procid="6629",severity_code=6i,timestamp=1533848508138040000i,version=1i
|
||||
```
|
||||
|
||||
### Output
|
||||
|
||||
```text
|
||||
syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org\ (influxdb.example.org),severity=info facility_code=3i,log_id="09p7QbOG000",lvl="info",message=" ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"",msg="Executing query",procid="6629",query="SHOW DATABASES",service="query",severity_code=6i,timestamp=1533848508138040000i,ts="2018-08-09T21:01:48.137963Z",version=1i
|
||||
```
|
201
plugins/processors/parser/parser.go
Normal file
201
plugins/processors/parser/parser.go
Normal file
|
@ -0,0 +1,201 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"encoding/base64"
|
||||
gobin "encoding/binary"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type Parser struct {
|
||||
DropOriginal bool `toml:"drop_original"`
|
||||
Merge string `toml:"merge"`
|
||||
ParseFields []string `toml:"parse_fields"`
|
||||
Base64Fields []string `toml:"parse_fields_base64"`
|
||||
ParseTags []string `toml:"parse_tags"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
parser telegraf.Parser
|
||||
}
|
||||
|
||||
func (p *Parser) Init() error {
|
||||
switch p.Merge {
|
||||
case "", "override", "override-with-timestamp":
|
||||
default:
|
||||
return fmt.Errorf("unrecognized merge value: %s", p.Merge)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*Parser) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (p *Parser) SetParser(parser telegraf.Parser) {
|
||||
p.parser = parser
|
||||
}
|
||||
|
||||
func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
|
||||
results := make([]telegraf.Metric, 0, len(metrics))
|
||||
for _, metric := range metrics {
|
||||
var newMetrics []telegraf.Metric
|
||||
if !p.DropOriginal {
|
||||
newMetrics = append(newMetrics, metric)
|
||||
} else {
|
||||
metric.Drop()
|
||||
}
|
||||
|
||||
// parse fields
|
||||
for _, field := range metric.FieldList() {
|
||||
plain := slices.Contains(p.ParseFields, field.Key)
|
||||
b64 := slices.Contains(p.Base64Fields, field.Key)
|
||||
|
||||
if !plain && !b64 {
|
||||
continue
|
||||
}
|
||||
|
||||
if plain && b64 {
|
||||
p.Log.Errorf("field %s is listed in both parse fields and base64 fields; skipping", field.Key)
|
||||
continue
|
||||
}
|
||||
|
||||
value, err := toBytes(field.Value)
|
||||
if err != nil {
|
||||
p.Log.Errorf("could not convert field %s: %v; skipping", field.Key, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if b64 {
|
||||
decoded := make([]byte, base64.StdEncoding.DecodedLen(len(value)))
|
||||
n, err := base64.StdEncoding.Decode(decoded, value)
|
||||
if err != nil {
|
||||
p.Log.Errorf("could not decode base64 field %s: %v; skipping", field.Key, err)
|
||||
continue
|
||||
}
|
||||
value = decoded[:n]
|
||||
}
|
||||
|
||||
fromFieldMetric, err := p.parser.Parse(value)
|
||||
if err != nil {
|
||||
p.Log.Errorf("could not parse field %s: %v", field.Key, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, m := range fromFieldMetric {
|
||||
// The parser get the parent plugin's name as
|
||||
// default measurement name. Thus, in case the
|
||||
// parsed metric does not provide a name itself,
|
||||
// the parser will return 'parser' as we are in
|
||||
// processors.parser. In those cases we want to
|
||||
// keep the original metric name.
|
||||
if m.Name() == "" || m.Name() == "parser" {
|
||||
m.SetName(metric.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// multiple parsed fields shouldn't create multiple
|
||||
// metrics so we'll merge tags/fields down into one
|
||||
// prior to returning.
|
||||
newMetrics = append(newMetrics, fromFieldMetric...)
|
||||
}
|
||||
|
||||
// parse tags
|
||||
for _, key := range p.ParseTags {
|
||||
if value, ok := metric.GetTag(key); ok {
|
||||
fromTagMetric, err := p.parseValue(value)
|
||||
if err != nil {
|
||||
p.Log.Errorf("could not parse tag %s: %v", key, err)
|
||||
}
|
||||
|
||||
for _, m := range fromTagMetric {
|
||||
// The parser get the parent plugin's name as
|
||||
// default measurement name. Thus, in case the
|
||||
// parsed metric does not provide a name itself,
|
||||
// the parser will return 'parser' as we are in
|
||||
// processors.parser. In those cases we want to
|
||||
// keep the original metric name.
|
||||
if m.Name() == "" || m.Name() == "parser" {
|
||||
m.SetName(metric.Name())
|
||||
}
|
||||
}
|
||||
|
||||
newMetrics = append(newMetrics, fromTagMetric...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(newMetrics) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if p.Merge == "override" {
|
||||
results = append(results, merge(newMetrics[0], newMetrics[1:]))
|
||||
} else if p.Merge == "override-with-timestamp" {
|
||||
results = append(results, mergeWithTimestamp(newMetrics[0], newMetrics[1:]))
|
||||
} else {
|
||||
results = append(results, newMetrics...)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func merge(base telegraf.Metric, metrics []telegraf.Metric) telegraf.Metric {
|
||||
for _, metric := range metrics {
|
||||
for _, field := range metric.FieldList() {
|
||||
base.AddField(field.Key, field.Value)
|
||||
}
|
||||
for _, tag := range metric.TagList() {
|
||||
base.AddTag(tag.Key, tag.Value)
|
||||
}
|
||||
base.SetName(metric.Name())
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
func mergeWithTimestamp(base telegraf.Metric, metrics []telegraf.Metric) telegraf.Metric {
|
||||
for _, metric := range metrics {
|
||||
for _, field := range metric.FieldList() {
|
||||
base.AddField(field.Key, field.Value)
|
||||
}
|
||||
for _, tag := range metric.TagList() {
|
||||
base.AddTag(tag.Key, tag.Value)
|
||||
}
|
||||
base.SetName(metric.Name())
|
||||
if !metric.Time().IsZero() {
|
||||
base.SetTime(metric.Time())
|
||||
}
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
func (p *Parser) parseValue(value string) ([]telegraf.Metric, error) {
|
||||
return p.parser.Parse([]byte(value))
|
||||
}
|
||||
|
||||
func toBytes(value interface{}) ([]byte, error) {
|
||||
if v, ok := value.(string); ok {
|
||||
return []byte(v), nil
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := gobin.Write(&buf, internal.HostEndianness, value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
processors.Add("parser", func() telegraf.Processor {
|
||||
return &Parser{DropOriginal: false}
|
||||
})
|
||||
}
|
1080
plugins/processors/parser/parser_test.go
Normal file
1080
plugins/processors/parser/parser_test.go
Normal file
File diff suppressed because it is too large
Load diff
31
plugins/processors/parser/sample.conf
Normal file
31
plugins/processors/parser/sample.conf
Normal file
|
@ -0,0 +1,31 @@
|
|||
# Parse a value in a specified field(s)/tag(s) and add the result in a new metric
|
||||
[[processors.parser]]
|
||||
## The name of the fields whose value will be parsed.
|
||||
parse_fields = ["message"]
|
||||
|
||||
## Fields to base64 decode.
|
||||
## These fields do not need to be specified in parse_fields.
|
||||
## Fields specified here will have base64 decode applied to them.
|
||||
# parse_fields_base64 = []
|
||||
|
||||
## The name of the tags whose value will be parsed.
|
||||
# parse_tags = []
|
||||
|
||||
## If true, incoming metrics are not emitted.
|
||||
# drop_original = false
|
||||
|
||||
## Merge Behavior
|
||||
## Only has effect when drop_original is set to false. Possible options
|
||||
## include:
|
||||
## * override: emitted metrics are merged by overriding the original metric
|
||||
## using the newly parsed metrics, but retains the original metric
|
||||
## timestamp.
|
||||
## * override-with-timestamp: the same as "override", but the timestamp is
|
||||
## set based on the new metrics if present.
|
||||
# merge = ""
|
||||
|
||||
## The dataformat to be read from files
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
Loading…
Add table
Add a link
Reference in a new issue