Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
22
plugins/parsers/prometheus/README.md
Normal file
22
plugins/parsers/prometheus/README.md
Normal file
|
@ -0,0 +1,22 @@
|
|||
# Prometheus Text-Based Format Parser Plugin
|
||||
|
||||
There are no additional configuration options for [Prometheus Text-Based
|
||||
Format][]. The metrics are parsed directly into Telegraf metrics. It is used
|
||||
internally in [prometheus input](/plugins/inputs/prometheus) or can be used in
|
||||
[http_listener_v2](/plugins/inputs/http_listener_v2) to simulate Pushgateway.
|
||||
|
||||
[Prometheus Text-Based Format]: https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml
|
||||
[[inputs.file]]
|
||||
files = ["example"]
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "prometheus"
|
||||
|
||||
```
|
37
plugins/parsers/prometheus/common.go
Normal file
37
plugins/parsers/prometheus/common.go
Normal file
|
@ -0,0 +1,37 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
func mapValueType(mt dto.MetricType) telegraf.ValueType {
|
||||
switch mt {
|
||||
case dto.MetricType_COUNTER:
|
||||
return telegraf.Counter
|
||||
case dto.MetricType_GAUGE:
|
||||
return telegraf.Gauge
|
||||
case dto.MetricType_SUMMARY:
|
||||
return telegraf.Summary
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
return telegraf.Histogram
|
||||
default:
|
||||
return telegraf.Untyped
|
||||
}
|
||||
}
|
||||
|
||||
func getTagsFromLabels(m *dto.Metric, defaultTags map[string]string) map[string]string {
|
||||
result := make(map[string]string, len(defaultTags)+len(m.Label))
|
||||
for key, value := range defaultTags {
|
||||
result[key] = value
|
||||
}
|
||||
|
||||
for _, label := range m.Label {
|
||||
if v := label.GetValue(); v != "" {
|
||||
result[label.GetName()] = v
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
85
plugins/parsers/prometheus/metric_v1.go
Normal file
85
plugins/parsers/prometheus/metric_v1.go
Normal file
|
@ -0,0 +1,85 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
func (p *Parser) extractMetricsV1(prommetrics *dto.MetricFamily) []telegraf.Metric {
|
||||
now := time.Now()
|
||||
|
||||
// Convert each prometheus metrics to the corresponding telegraf metrics.
|
||||
// You will get one telegraf metric with one field per prometheus metric
|
||||
// for "simple" types like Gauge and Counter but a telegraf metric with
|
||||
// multiple fields for "complex" types like Summary or Histogram.
|
||||
var metrics []telegraf.Metric
|
||||
metricName := prommetrics.GetName()
|
||||
metricType := prommetrics.GetType()
|
||||
for _, pm := range prommetrics.Metric {
|
||||
// Extract the timestamp of the metric if it exists and should
|
||||
// not be ignored.
|
||||
t := now
|
||||
if ts := pm.GetTimestampMs(); !p.IgnoreTimestamp && ts > 0 {
|
||||
t = time.UnixMilli(ts)
|
||||
}
|
||||
|
||||
// Convert the labels to tags
|
||||
tags := getTagsFromLabels(pm, p.DefaultTags)
|
||||
|
||||
// Construct the metrics
|
||||
switch metricType {
|
||||
case dto.MetricType_SUMMARY:
|
||||
summary := pm.GetSummary()
|
||||
|
||||
// Collect the fields
|
||||
fields := make(map[string]interface{}, len(summary.Quantile)+2)
|
||||
fields["count"] = float64(summary.GetSampleCount())
|
||||
fields["sum"] = summary.GetSampleSum()
|
||||
for _, q := range summary.Quantile {
|
||||
if v := q.GetValue(); !math.IsNaN(v) {
|
||||
fname := strconv.FormatFloat(q.GetQuantile(), 'g', -1, 64)
|
||||
fields[fname] = v
|
||||
}
|
||||
}
|
||||
metrics = append(metrics, metric.New(metricName, tags, fields, t, telegraf.Summary))
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
histogram := pm.GetHistogram()
|
||||
|
||||
// Collect the fields
|
||||
fields := make(map[string]interface{}, len(histogram.Bucket)+2)
|
||||
fields["count"] = float64(pm.GetHistogram().GetSampleCount())
|
||||
fields["sum"] = pm.GetHistogram().GetSampleSum()
|
||||
for _, b := range histogram.Bucket {
|
||||
fname := strconv.FormatFloat(b.GetUpperBound(), 'g', -1, 64)
|
||||
fields[fname] = float64(b.GetCumulativeCount())
|
||||
}
|
||||
metrics = append(metrics, metric.New(metricName, tags, fields, t, telegraf.Histogram))
|
||||
default:
|
||||
var fname string
|
||||
var v float64
|
||||
if gauge := pm.GetGauge(); gauge != nil {
|
||||
fname = "gauge"
|
||||
v = gauge.GetValue()
|
||||
} else if counter := pm.GetCounter(); counter != nil {
|
||||
fname = "counter"
|
||||
v = counter.GetValue()
|
||||
} else if untyped := pm.GetUntyped(); untyped != nil {
|
||||
fname = "value"
|
||||
v = untyped.GetValue()
|
||||
}
|
||||
if fname != "" && !math.IsNaN(v) {
|
||||
fields := map[string]interface{}{fname: v}
|
||||
vtype := mapValueType(metricType)
|
||||
metrics = append(metrics, metric.New(metricName, tags, fields, t, vtype))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return metrics
|
||||
}
|
107
plugins/parsers/prometheus/metric_v2.go
Normal file
107
plugins/parsers/prometheus/metric_v2.go
Normal file
|
@ -0,0 +1,107 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
func (p *Parser) extractMetricsV2(prommetrics *dto.MetricFamily) []telegraf.Metric {
|
||||
now := time.Now()
|
||||
|
||||
// Convert each prometheus metric to a corresponding telegraf metric
|
||||
// with one field each. The process will filter NaNs in values and skip
|
||||
// the corresponding metrics.
|
||||
var metrics []telegraf.Metric
|
||||
metricName := prommetrics.GetName()
|
||||
metricType := prommetrics.GetType()
|
||||
for _, pm := range prommetrics.Metric {
|
||||
// Extract the timestamp of the metric if it exists and should
|
||||
// not be ignored.
|
||||
t := now
|
||||
if ts := pm.GetTimestampMs(); !p.IgnoreTimestamp && ts > 0 {
|
||||
t = time.UnixMilli(ts)
|
||||
}
|
||||
|
||||
// Convert the labels to tags
|
||||
tags := getTagsFromLabels(pm, p.DefaultTags)
|
||||
|
||||
// Construct the metrics
|
||||
switch metricType {
|
||||
case dto.MetricType_SUMMARY:
|
||||
summary := pm.GetSummary()
|
||||
|
||||
// Add an overall metric containing the number of samples and and its sum
|
||||
summaryFields := make(map[string]interface{})
|
||||
summaryFields[metricName+"_count"] = float64(summary.GetSampleCount())
|
||||
summaryFields[metricName+"_sum"] = summary.GetSampleSum()
|
||||
metrics = append(metrics, metric.New("prometheus", tags, summaryFields, t, telegraf.Summary))
|
||||
|
||||
// Add one metric per quantile
|
||||
for _, q := range summary.Quantile {
|
||||
quantileTags := tags
|
||||
quantileTags["quantile"] = strconv.FormatFloat(q.GetQuantile(), 'g', -1, 64)
|
||||
quantileFields := map[string]interface{}{
|
||||
metricName: q.GetValue(),
|
||||
}
|
||||
m := metric.New("prometheus", quantileTags, quantileFields, t, telegraf.Summary)
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
histogram := pm.GetHistogram()
|
||||
|
||||
// Add an overall metric containing the number of samples and and its sum
|
||||
histFields := make(map[string]interface{})
|
||||
histFields[metricName+"_count"] = float64(histogram.GetSampleCount())
|
||||
histFields[metricName+"_sum"] = histogram.GetSampleSum()
|
||||
metrics = append(metrics, metric.New("prometheus", tags, histFields, t, telegraf.Histogram))
|
||||
|
||||
// Add one metric per histogram bucket
|
||||
var infSeen bool
|
||||
for _, b := range histogram.Bucket {
|
||||
bucketTags := tags
|
||||
bucketTags["le"] = strconv.FormatFloat(b.GetUpperBound(), 'g', -1, 64)
|
||||
bucketFields := map[string]interface{}{
|
||||
metricName + "_bucket": float64(b.GetCumulativeCount()),
|
||||
}
|
||||
m := metric.New("prometheus", bucketTags, bucketFields, t, telegraf.Histogram)
|
||||
metrics = append(metrics, m)
|
||||
|
||||
// Record if any of the buckets marks an infinite upper bound
|
||||
infSeen = infSeen || math.IsInf(b.GetUpperBound(), +1)
|
||||
}
|
||||
|
||||
// Infinity bucket is required for proper function of histogram in prometheus
|
||||
if !infSeen {
|
||||
infTags := tags
|
||||
infTags["le"] = "+Inf"
|
||||
infFields := map[string]interface{}{
|
||||
metricName + "_bucket": float64(histogram.GetSampleCount()),
|
||||
}
|
||||
m := metric.New("prometheus", infTags, infFields, t, telegraf.Histogram)
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
default:
|
||||
v := math.Inf(1)
|
||||
if gauge := pm.GetGauge(); gauge != nil {
|
||||
v = gauge.GetValue()
|
||||
} else if counter := pm.GetCounter(); counter != nil {
|
||||
v = counter.GetValue()
|
||||
} else if untyped := pm.GetUntyped(); untyped != nil {
|
||||
v = untyped.GetValue()
|
||||
}
|
||||
if !math.IsNaN(v) {
|
||||
fields := map[string]interface{}{metricName: v}
|
||||
vtype := mapValueType(metricType)
|
||||
metrics = append(metrics, metric.New("prometheus", tags, fields, t, vtype))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return metrics
|
||||
}
|
99
plugins/parsers/prometheus/parser.go
Normal file
99
plugins/parsers/prometheus/parser.go
Normal file
|
@ -0,0 +1,99 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
func AcceptsContent(header http.Header) bool {
|
||||
return expfmt.ResponseFormat(header).FormatType() != expfmt.TypeUnknown
|
||||
}
|
||||
|
||||
type Parser struct {
|
||||
IgnoreTimestamp bool `toml:"prometheus_ignore_timestamp"`
|
||||
MetricVersion int `toml:"prometheus_metric_version"`
|
||||
Header http.Header `toml:"-"` // set by the prometheus input
|
||||
DefaultTags map[string]string `toml:"-"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
func (p *Parser) SetDefaultTags(tags map[string]string) {
|
||||
p.DefaultTags = tags
|
||||
}
|
||||
|
||||
func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) {
|
||||
// Determine the metric transport-type derived from the response header and
|
||||
// create a matching decoder.
|
||||
format := expfmt.NewFormat(expfmt.TypeProtoCompact)
|
||||
if len(p.Header) > 0 {
|
||||
format = expfmt.ResponseFormat(p.Header)
|
||||
switch format.FormatType() {
|
||||
case expfmt.TypeProtoText:
|
||||
// Make sure we have a finishing newline but no trailing one
|
||||
data = bytes.TrimPrefix(data, []byte("\n"))
|
||||
if !bytes.HasSuffix(data, []byte("\n")) {
|
||||
data = append(data, []byte("\n")...)
|
||||
}
|
||||
case expfmt.TypeUnknown:
|
||||
p.Log.Debugf("Unknown format %q... Trying to continue...", p.Header.Get("Content-Type"))
|
||||
}
|
||||
}
|
||||
buf := bytes.NewBuffer(data)
|
||||
decoder := expfmt.NewDecoder(buf, format)
|
||||
|
||||
// Decode the input data into prometheus metrics
|
||||
var metrics []telegraf.Metric
|
||||
for {
|
||||
var mf dto.MetricFamily
|
||||
if err := decoder.Decode(&mf); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return nil, fmt.Errorf("decoding response failed: %w", err)
|
||||
}
|
||||
|
||||
switch p.MetricVersion {
|
||||
case 0, 2:
|
||||
metrics = append(metrics, p.extractMetricsV2(&mf)...)
|
||||
case 1:
|
||||
metrics = append(metrics, p.extractMetricsV1(&mf)...)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown prometheus metric version %d", p.MetricVersion)
|
||||
}
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
metrics, err := p.Parse([]byte(line))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(metrics) < 1 {
|
||||
return nil, errors.New("no metrics in line")
|
||||
}
|
||||
|
||||
if len(metrics) > 1 {
|
||||
return nil, errors.New("more than one metric in line")
|
||||
}
|
||||
|
||||
return metrics[0], nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
parsers.Add("prometheus",
|
||||
func(string) telegraf.Parser {
|
||||
return &Parser{}
|
||||
},
|
||||
)
|
||||
}
|
178
plugins/parsers/prometheus/parser_test.go
Normal file
178
plugins/parsers/prometheus/parser_test.go
Normal file
|
@ -0,0 +1,178 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/models"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
test "github.com/influxdata/telegraf/testutil/plugin_input"
|
||||
)
|
||||
|
||||
func TestCases(t *testing.T) {
|
||||
// Get all directories in testcases
|
||||
folders, err := os.ReadDir("testcases")
|
||||
require.NoError(t, err)
|
||||
// Make sure testdata contains data
|
||||
require.NotEmpty(t, folders)
|
||||
|
||||
for _, f := range folders {
|
||||
fname := f.Name()
|
||||
testdataPath := filepath.Join("testcases", fname)
|
||||
configFilename := filepath.Join(testdataPath, "telegraf.conf")
|
||||
|
||||
// Run tests as metric version 1
|
||||
t.Run(fname+"_v1", func(t *testing.T) {
|
||||
// Load the configuration
|
||||
cfg := config.NewConfig()
|
||||
require.NoError(t, cfg.LoadConfig(configFilename))
|
||||
require.Len(t, cfg.Inputs, 1)
|
||||
|
||||
// Tune plugin
|
||||
plugin := cfg.Inputs[0].Input.(*test.Plugin)
|
||||
plugin.Path = testdataPath
|
||||
plugin.UseTypeTag = "_type"
|
||||
plugin.ExpectedFilename = "expected_v1.out"
|
||||
|
||||
parser := plugin.Parser.(*models.RunningParser).Parser.(*Parser)
|
||||
parser.MetricVersion = 1
|
||||
if raw, found := plugin.AdditionalParams["headers"]; found {
|
||||
headers, ok := raw.(map[string]interface{})
|
||||
require.Truef(t, ok, "unknown header type %T", raw)
|
||||
parser.Header = make(http.Header)
|
||||
for k, rv := range headers {
|
||||
v, ok := rv.(string)
|
||||
require.Truef(t, ok, "unknown header value type %T for %q", raw, k)
|
||||
parser.Header.Add(k, v)
|
||||
}
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
|
||||
// Gather data and check errors
|
||||
var acc testutil.Accumulator
|
||||
err := plugin.Gather(&acc)
|
||||
switch len(plugin.ExpectedErrors) {
|
||||
case 0:
|
||||
require.NoError(t, err)
|
||||
case 1:
|
||||
require.ErrorContains(t, err, plugin.ExpectedErrors[0])
|
||||
default:
|
||||
require.Contains(t, plugin.ExpectedErrors, err.Error())
|
||||
}
|
||||
|
||||
// Determine checking options
|
||||
options := []cmp.Option{
|
||||
testutil.SortMetrics(),
|
||||
}
|
||||
if plugin.ShouldIgnoreTimestamp || parser.IgnoreTimestamp {
|
||||
options = append(options, testutil.IgnoreTime())
|
||||
}
|
||||
|
||||
// Check the resulting metrics
|
||||
actual := acc.GetTelegrafMetrics()
|
||||
testutil.RequireMetricsEqual(t, plugin.Expected, actual, options...)
|
||||
|
||||
// Special checks
|
||||
if parser.IgnoreTimestamp {
|
||||
t.Log("testing ignore-timestamp case")
|
||||
for i, m := range actual {
|
||||
expected := plugin.Expected[i]
|
||||
require.Greaterf(t, m.Time(), expected.Time(), "metric time not after prometheus value in %d", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Run tests as metric version 2
|
||||
t.Run(fname+"_v2", func(t *testing.T) {
|
||||
// Load the configuration
|
||||
cfg := config.NewConfig()
|
||||
require.NoError(t, cfg.LoadConfig(configFilename))
|
||||
require.Len(t, cfg.Inputs, 1)
|
||||
|
||||
// Tune plugin
|
||||
plugin := cfg.Inputs[0].Input.(*test.Plugin)
|
||||
plugin.Path = testdataPath
|
||||
plugin.UseTypeTag = "_type"
|
||||
plugin.ExpectedFilename = "expected_v2.out"
|
||||
|
||||
parser := plugin.Parser.(*models.RunningParser).Parser.(*Parser)
|
||||
parser.MetricVersion = 2
|
||||
if raw, found := plugin.AdditionalParams["headers"]; found {
|
||||
headers, ok := raw.(map[string]interface{})
|
||||
require.Truef(t, ok, "unknown header type %T", raw)
|
||||
parser.Header = make(http.Header)
|
||||
for k, rv := range headers {
|
||||
v, ok := rv.(string)
|
||||
require.Truef(t, ok, "unknown header value type %T for %q", raw, k)
|
||||
parser.Header.Add(k, v)
|
||||
}
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
|
||||
// Gather data and check errors
|
||||
var acc testutil.Accumulator
|
||||
err := plugin.Gather(&acc)
|
||||
switch len(plugin.ExpectedErrors) {
|
||||
case 0:
|
||||
require.NoError(t, err)
|
||||
case 1:
|
||||
require.ErrorContains(t, err, plugin.ExpectedErrors[0])
|
||||
default:
|
||||
require.Contains(t, plugin.ExpectedErrors, err.Error())
|
||||
}
|
||||
|
||||
// Determine checking options
|
||||
options := []cmp.Option{
|
||||
testutil.SortMetrics(),
|
||||
}
|
||||
if plugin.ShouldIgnoreTimestamp || parser.IgnoreTimestamp {
|
||||
options = append(options, testutil.IgnoreTime())
|
||||
}
|
||||
|
||||
// Check the resulting metrics
|
||||
actual := acc.GetTelegrafMetrics()
|
||||
testutil.RequireMetricsEqual(t, plugin.Expected, actual, options...)
|
||||
|
||||
// Special checks
|
||||
if parser.IgnoreTimestamp {
|
||||
t.Log("testing ignore-timestamp case")
|
||||
for i, m := range actual {
|
||||
expected := plugin.Expected[i]
|
||||
require.Greaterf(t, m.Time(), expected.Time(), "metric time not after prometheus value in %d", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParsingMetricVersion1(b *testing.B) {
|
||||
plugin := &Parser{MetricVersion: 1}
|
||||
|
||||
benchmarkData, err := os.ReadFile(filepath.FromSlash("testcases/benchmark/input.txt"))
|
||||
require.NoError(b, err)
|
||||
require.NotEmpty(b, benchmarkData)
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
|
||||
plugin.Parse(benchmarkData)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParsingMetricVersion2(b *testing.B) {
|
||||
plugin := &Parser{MetricVersion: 2}
|
||||
|
||||
benchmarkData, err := os.ReadFile(filepath.FromSlash("testcases/benchmark/input.txt"))
|
||||
require.NoError(b, err)
|
||||
require.NotEmpty(b, benchmarkData)
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
|
||||
plugin.Parse(benchmarkData)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
benchmark_a,_type=gauge,source=myhost,tags_platform=python,tags_sdkver=3.11.5 gauge=5 1653643420000000000
|
||||
benchmark_b,_type=gauge,source=myhost,tags_platform=python,tags_sdkver=3.11.4 gauge=4 1653643420000000000
|
|
@ -0,0 +1,2 @@
|
|||
prometheus,_type=gauge,source=myhost,tags_platform=python,tags_sdkver=3.11.5 benchmark_a=5 1653643420000000000
|
||||
prometheus,_type=gauge,source=myhost,tags_platform=python,tags_sdkver=3.11.4 benchmark_b=4 1653643420000000000
|
7
plugins/parsers/prometheus/testcases/benchmark/input.txt
Normal file
7
plugins/parsers/prometheus/testcases/benchmark/input.txt
Normal file
|
@ -0,0 +1,7 @@
|
|||
# HELP benchmark_a Test metric for benchmarking
|
||||
# TYPE benchmark_a gauge
|
||||
benchmark_a{source="myhost",tags_platform="python",tags_sdkver="3.11.5"} 5 1653643420000
|
||||
|
||||
# HELP benchmark_b Test metric for benchmarking
|
||||
# TYPE benchmark_b gauge
|
||||
benchmark_b{source="myhost",tags_platform="python",tags_sdkver="3.11.4"} 4 1653643420000
|
|
@ -0,0 +1,4 @@
|
|||
[[inputs.test]]
|
||||
files = ["input.txt"]
|
||||
data_format = "prometheus"
|
||||
|
|
@ -0,0 +1 @@
|
|||
cadvisor_version_info,_type=gauge,dockerVersion=1.8.2,kernelVersion=3.10.0-229.20.1.el7.x86_64,osVersion=CentOS\ Linux\ 7\ (Core),defaultTag=defaultTagValue gauge=1
|
|
@ -0,0 +1 @@
|
|||
prometheus,_type=gauge,dockerVersion=1.8.2,kernelVersion=3.10.0-229.20.1.el7.x86_64,osVersion=CentOS\ Linux\ 7\ (Core),defaultTag=defaultTagValue cadvisor_version_info=1
|
|
@ -0,0 +1,3 @@
|
|||
# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision.
|
||||
# TYPE cadvisor_version_info gauge
|
||||
cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1
|
|
@ -0,0 +1,7 @@
|
|||
[[inputs.test]]
|
||||
files = ["input.txt"]
|
||||
data_format = "prometheus"
|
||||
|
||||
[inputs.test.default_tag_defs]
|
||||
defaultTag = "defaultTagValue"
|
||||
dockerVersion = "to_be_overridden"
|
|
@ -0,0 +1 @@
|
|||
apiserver_request_latencies,_type=histogram,resource=bindings,verb=POST 125000=1994,1e+06=2005,250000=1997,2e+06=2012,4e+06=2017,500000=2000,8e+06=2024,count=2025,sum=102726334
|
|
@ -0,0 +1,9 @@
|
|||
prometheus,_type=histogram,resource=bindings,verb=POST apiserver_request_latencies_count=2025,apiserver_request_latencies_sum=102726334
|
||||
prometheus,_type=histogram,le=125000,resource=bindings,verb=POST apiserver_request_latencies_bucket=1994
|
||||
prometheus,_type=histogram,le=250000,resource=bindings,verb=POST apiserver_request_latencies_bucket=1997
|
||||
prometheus,_type=histogram,le=500000,resource=bindings,verb=POST apiserver_request_latencies_bucket=2000
|
||||
prometheus,_type=histogram,le=1e+06,resource=bindings,verb=POST apiserver_request_latencies_bucket=2005
|
||||
prometheus,_type=histogram,le=2e+06,resource=bindings,verb=POST apiserver_request_latencies_bucket=2012
|
||||
prometheus,_type=histogram,le=4e+06,resource=bindings,verb=POST apiserver_request_latencies_bucket=2017
|
||||
prometheus,_type=histogram,le=8e+06,resource=bindings,verb=POST apiserver_request_latencies_bucket=2024
|
||||
prometheus,_type=histogram,le=+Inf,resource=bindings,verb=POST apiserver_request_latencies_bucket=2025
|
Binary file not shown.
|
@ -0,0 +1,6 @@
|
|||
[[inputs.test]]
|
||||
files = ["input.bin"]
|
||||
data_format = "prometheus"
|
||||
|
||||
[inputs.test.additional_params]
|
||||
headers = {Content-Type = "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited"}
|
|
@ -0,0 +1 @@
|
|||
test_counter,_type=counter,label=test counter=1 1601830800000000000
|
|
@ -0,0 +1 @@
|
|||
prometheus,_type=counter,label=test test_counter=1 1601830800000000000
|
|
@ -0,0 +1,2 @@
|
|||
# TYPE test_counter counter
|
||||
test_counter{label="test"} 1 1601830800000
|
|
@ -0,0 +1,4 @@
|
|||
[[inputs.test]]
|
||||
files = ["input.txt"]
|
||||
data_format = "prometheus"
|
||||
prometheus_ignore_timestamp = true
|
|
@ -0,0 +1 @@
|
|||
test_counter,_type=counter,label=test counter=1 1601830800000000000
|
|
@ -0,0 +1 @@
|
|||
prometheus,_type=counter,label=test test_counter=1 1601830800000000000
|
|
@ -0,0 +1,2 @@
|
|||
# TYPE test_counter counter
|
||||
test_counter{label="test"} 1 1601830800000
|
|
@ -0,0 +1,4 @@
|
|||
[[inputs.test]]
|
||||
files = ["input.txt"]
|
||||
data_format = "prometheus"
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
swap_free,_type=gauge,host=omsk gauge=977911808
|
||||
swap_in,_type=counter,host=omsk counter=2031616
|
||||
swap_out,_type=counter,host=omsk counter=15790080
|
||||
swap_total,_type=gauge,host=omsk gauge=993185792
|
||||
swap_used,_type=gauge,host=omsk gauge=15273984
|
||||
swap_used_percent,_type=gauge,host=omsk gauge=1.5378778193395661
|
|
@ -0,0 +1,6 @@
|
|||
prometheus,_type=gauge,host=omsk swap_used_percent=1.5378778193395661
|
||||
prometheus,_type=gauge,host=omsk swap_free=977911808
|
||||
prometheus,_type=counter,host=omsk swap_in=2031616
|
||||
prometheus,_type=counter,host=omsk swap_out=15790080
|
||||
prometheus,_type=gauge,host=omsk swap_total=993185792
|
||||
prometheus,_type=gauge,host=omsk swap_used=15273984
|
BIN
plugins/parsers/prometheus/testcases/protobuf/input.bin
Normal file
BIN
plugins/parsers/prometheus/testcases/protobuf/input.bin
Normal file
Binary file not shown.
|
@ -0,0 +1,6 @@
|
|||
[[inputs.test]]
|
||||
files = ["input.bin"]
|
||||
data_format = "prometheus"
|
||||
|
||||
[inputs.test.additional_params]
|
||||
headers = {Content-Type = "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited"}
|
|
@ -0,0 +1 @@
|
|||
get_token_fail_count,_type=counter counter=0
|
|
@ -0,0 +1 @@
|
|||
prometheus,_type=counter get_token_fail_count=0
|
|
@ -0,0 +1,3 @@
|
|||
# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source
|
||||
# TYPE get_token_fail_count counter
|
||||
get_token_fail_count 0
|
|
@ -0,0 +1,4 @@
|
|||
[[inputs.test]]
|
||||
files = ["input.txt"]
|
||||
data_format = "prometheus"
|
||||
|
|
@ -0,0 +1 @@
|
|||
cadvisor_version_info,_type=gauge,dockerVersion=1.8.2,kernelVersion=3.10.0-229.20.1.el7.x86_64,osVersion=CentOS\ Linux\ 7\ (Core) gauge=1
|
|
@ -0,0 +1 @@
|
|||
prometheus,_type=gauge,dockerVersion=1.8.2,kernelVersion=3.10.0-229.20.1.el7.x86_64,osVersion=CentOS\ Linux\ 7\ (Core) cadvisor_version_info=1
|
|
@ -0,0 +1,3 @@
|
|||
# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision.
|
||||
# TYPE cadvisor_version_info gauge
|
||||
cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1
|
|
@ -0,0 +1,4 @@
|
|||
[[inputs.test]]
|
||||
files = ["input.txt"]
|
||||
data_format = "prometheus"
|
||||
|
|
@ -0,0 +1 @@
|
|||
apiserver_request_latencies,_type=histogram,resource=bindings,verb=POST +Inf=2025,125000=1994,1e+06=2005,250000=1997,2e+06=2012,4e+06=2017,500000=2000,8e+06=2024,count=2025,sum=102726334
|
|
@ -0,0 +1,9 @@
|
|||
prometheus,_type=histogram,resource=bindings,verb=POST apiserver_request_latencies_count=2025,apiserver_request_latencies_sum=102726334
|
||||
prometheus,_type=histogram,le=125000,resource=bindings,verb=POST apiserver_request_latencies_bucket=1994
|
||||
prometheus,_type=histogram,le=250000,resource=bindings,verb=POST apiserver_request_latencies_bucket=1997
|
||||
prometheus,_type=histogram,le=500000,resource=bindings,verb=POST apiserver_request_latencies_bucket=2000
|
||||
prometheus,_type=histogram,le=1e+06,resource=bindings,verb=POST apiserver_request_latencies_bucket=2005
|
||||
prometheus,_type=histogram,le=2e+06,resource=bindings,verb=POST apiserver_request_latencies_bucket=2012
|
||||
prometheus,_type=histogram,le=4e+06,resource=bindings,verb=POST apiserver_request_latencies_bucket=2017
|
||||
prometheus,_type=histogram,le=8e+06,resource=bindings,verb=POST apiserver_request_latencies_bucket=2024
|
||||
prometheus,_type=histogram,le=+Inf,resource=bindings,verb=POST apiserver_request_latencies_bucket=2025
|
|
@ -0,0 +1,12 @@
|
|||
# HELP apiserver_request_latencies Response latency distribution in microseconds for each verb, resource and client.
|
||||
# TYPE apiserver_request_latencies histogram
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="125000"} 1994
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="250000"} 1997
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="500000"} 2000
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="1e+06"} 2005
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="2e+06"} 2012
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="4e+06"} 2017
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="8e+06"} 2024
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="+Inf"} 2025
|
||||
apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08
|
||||
apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025
|
|
@ -0,0 +1,4 @@
|
|||
[[inputs.test]]
|
||||
files = ["input.txt"]
|
||||
data_format = "prometheus"
|
||||
|
|
@ -0,0 +1 @@
|
|||
http_request_duration_microseconds,_type=summary,handler=prometheus 0.5=552048.506,0.9=5876804.288,0.99=5876804.288,count=9,sum=18909097.205
|
|
@ -0,0 +1,4 @@
|
|||
prometheus,_type=summary,handler=prometheus http_request_duration_microseconds_count=9,http_request_duration_microseconds_sum=18909097.205
|
||||
prometheus,_type=summary,handler=prometheus,quantile=0.5 http_request_duration_microseconds=552048.506
|
||||
prometheus,_type=summary,handler=prometheus,quantile=0.9 http_request_duration_microseconds=5876804.288
|
||||
prometheus,_type=summary,handler=prometheus,quantile=0.99 http_request_duration_microseconds=5876804.288
|
|
@ -0,0 +1,7 @@
|
|||
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
|
||||
# TYPE http_request_duration_microseconds summary
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 5.876804288e+06
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 5.876804288e+06
|
||||
http_request_duration_microseconds_sum{handler="prometheus"} 1.8909097205e+07
|
||||
http_request_duration_microseconds_count{handler="prometheus"} 9
|
|
@ -0,0 +1,4 @@
|
|||
[[inputs.test]]
|
||||
files = ["input.txt"]
|
||||
data_format = "prometheus"
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue