1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,96 @@
# Prometheus
The `prometheus` data format converts metrics into the Prometheus text
exposition format. When used with the `prometheus` input, the input should be
use the `metric_version = 2` option in order to properly round trip metrics.
**Warning**: When generating histogram and summary types, output may
not be correct if the metric spans multiple batches. This issue can be
somewhat, but not fully, mitigated by using outputs that support writing in
"batch format". When using histogram and summary types, it is recommended to
use only the `prometheus_client` output. Histogram and Summary types
also update their expiration time based on the most recently received data.
If incoming metrics stop updating specific buckets or quantiles but continue
reporting others every bucket/quantile will continue to exist.
## Configuration
```toml
[[outputs.file]]
files = ["stdout"]
use_batch_format = true
## Include the metric timestamp on each sample.
prometheus_export_timestamp = false
## Sort prometheus metric families and metric samples. Useful for
## debugging.
prometheus_sort_metrics = false
## Output string fields as metric labels; when false string fields are
## discarded.
prometheus_string_as_label = false
## Encode metrics without HELP metadata. This helps reduce the payload
## size.
prometheus_compact_encoding = false
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "prometheus"
## Specify the metric type explicitly.
## This overrides the metric-type of the Telegraf metric. Globbing is allowed.
[outputs.file.prometheus_metric_types]
counter = []
gauge = []
```
### Metrics
A Prometheus metric is created for each integer, float, boolean or unsigned
field. Boolean values are converted to *1.0* for true and *0.0* for false.
The Prometheus metric names are produced by joining the measurement name with
the field key. In the special case where the measurement name is `prometheus`
it is not included in the final metric name.
Prometheus labels are produced for each tag.
**Note:** String fields are ignored and do not produce Prometheus metrics.
## Example
### Example Input
```text
cpu,cpu=cpu0 time_guest=8022.6,time_system=26145.98,time_user=92512.89 1574317740000000000
cpu,cpu=cpu1 time_guest=8097.88,time_system=25223.35,time_user=96519.58 1574317740000000000
cpu,cpu=cpu2 time_guest=7386.28,time_system=24870.37,time_user=95631.59 1574317740000000000
cpu,cpu=cpu3 time_guest=7434.19,time_system=24843.71,time_user=93753.88 1574317740000000000
```
### Example Output
```text
# HELP cpu_time_guest Telegraf collected metric
# TYPE cpu_time_guest counter
cpu_time_guest{cpu="cpu0"} 9582.54
cpu_time_guest{cpu="cpu1"} 9660.88
cpu_time_guest{cpu="cpu2"} 8946.45
cpu_time_guest{cpu="cpu3"} 9002.31
# HELP cpu_time_system Telegraf collected metric
# TYPE cpu_time_system counter
cpu_time_system{cpu="cpu0"} 28675.47
cpu_time_system{cpu="cpu1"} 27779.34
cpu_time_system{cpu="cpu2"} 27406.18
cpu_time_system{cpu="cpu3"} 27404.97
# HELP cpu_time_user Telegraf collected metric
# TYPE cpu_time_user counter
cpu_time_user{cpu="cpu0"} 99551.84
cpu_time_user{cpu="cpu1"} 103468.52
cpu_time_user{cpu="cpu2"} 102591.45
cpu_time_user{cpu="cpu3"} 100717.05
```

View file

@ -0,0 +1,488 @@
package prometheus
import (
"hash/fnv"
"sort"
"strconv"
"strings"
"time"
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
"github.com/influxdata/telegraf"
)
const helpString = "Telegraf collected metric"
type TimeFunc func() time.Time
type metricFamily struct {
Name string
Type telegraf.ValueType
}
type Metric struct {
Labels []labelPair
Time time.Time
AddTime time.Time
Scaler *scaler
Histogram *histogram
Summary *summary
}
type labelPair struct {
Name string
Value string
}
type scaler struct {
Value float64
}
type bucket struct {
Bound float64
Count uint64
}
type quantile struct {
Quantile float64
Value float64
}
type histogram struct {
Buckets []bucket
Count uint64
Sum float64
}
func (h *histogram) merge(b bucket) {
for i := range h.Buckets {
if h.Buckets[i].Bound == b.Bound {
h.Buckets[i].Count = b.Count
return
}
}
h.Buckets = append(h.Buckets, b)
}
type summary struct {
Quantiles []quantile
Count uint64
Sum float64
}
func (s *summary) merge(q quantile) {
for i := range s.Quantiles {
if s.Quantiles[i].Quantile == q.Quantile {
s.Quantiles[i].Value = q.Value
return
}
}
s.Quantiles = append(s.Quantiles, q)
}
type metricKey uint64
func makeMetricKey(labels []labelPair) metricKey {
h := fnv.New64a()
for _, label := range labels {
h.Write([]byte(label.Name))
h.Write([]byte("\x00"))
h.Write([]byte(label.Value))
h.Write([]byte("\x00"))
}
return metricKey(h.Sum64())
}
type entry struct {
Family metricFamily
Metrics map[metricKey]*Metric
}
type Collection struct {
Entries map[metricFamily]entry
config FormatConfig
}
func NewCollection(config FormatConfig) *Collection {
cache := &Collection{
Entries: make(map[metricFamily]entry),
config: config,
}
return cache
}
func hasLabel(name string, labels []labelPair) bool {
for _, label := range labels {
if name == label.Name {
return true
}
}
return false
}
func (c *Collection) createLabels(metric telegraf.Metric) []labelPair {
labels := make([]labelPair, 0, len(metric.TagList()))
for _, tag := range metric.TagList() {
// Ignore special tags for histogram and summary types.
switch metric.Type() {
case telegraf.Histogram:
if tag.Key == "le" {
continue
}
case telegraf.Summary:
if tag.Key == "quantile" {
continue
}
}
name, ok := SanitizeLabelName(tag.Key)
if !ok {
continue
}
labels = append(labels, labelPair{Name: name, Value: tag.Value})
}
if !c.config.StringAsLabel {
return labels
}
addedFieldLabel := false
for _, field := range metric.FieldList() {
value, ok := field.Value.(string)
if !ok {
continue
}
name, ok := SanitizeLabelName(field.Key)
if !ok {
continue
}
// If there is a tag with the same name as the string field, discard
// the field and use the tag instead.
if hasLabel(name, labels) {
continue
}
labels = append(labels, labelPair{Name: name, Value: value})
addedFieldLabel = true
}
if addedFieldLabel {
sort.Slice(labels, func(i, j int) bool {
return labels[i].Name < labels[j].Name
})
}
return labels
}
func (c *Collection) Add(metric telegraf.Metric, now time.Time) {
labels := c.createLabels(metric)
for _, field := range metric.FieldList() {
metricName := MetricName(metric.Name(), field.Key, metric.Type())
metricName, ok := SanitizeMetricName(metricName)
if !ok {
continue
}
metricType := c.config.TypeMappings.DetermineType(metricName, metric)
family := metricFamily{
Name: metricName,
Type: metricType,
}
singleEntry, ok := c.Entries[family]
if !ok {
singleEntry = entry{
Family: family,
Metrics: make(map[metricKey]*Metric),
}
c.Entries[family] = singleEntry
}
metricKey := makeMetricKey(labels)
m, ok := singleEntry.Metrics[metricKey]
if ok {
// A batch of metrics can contain multiple values for a single
// Prometheus sample. If this metric is older than the existing
// sample then we can skip over it.
if metric.Time().Before(m.Time) {
continue
}
}
switch metric.Type() {
case telegraf.Counter:
fallthrough
case telegraf.Gauge:
fallthrough
case telegraf.Untyped:
value, ok := SampleValue(field.Value)
if !ok {
continue
}
m = &Metric{
Labels: labels,
Time: metric.Time(),
AddTime: now,
Scaler: &scaler{Value: value},
}
singleEntry.Metrics[metricKey] = m
case telegraf.Histogram:
if m == nil {
m = &Metric{
Labels: labels,
Time: metric.Time(),
AddTime: now,
Histogram: &histogram{},
}
} else {
m.Time = metric.Time()
m.AddTime = now
}
switch {
case strings.HasSuffix(field.Key, "_bucket"):
le, ok := metric.GetTag("le")
if !ok {
continue
}
bound, err := strconv.ParseFloat(le, 64)
if err != nil {
continue
}
count, ok := SampleCount(field.Value)
if !ok {
continue
}
m.Histogram.merge(bucket{
Bound: bound,
Count: count,
})
case strings.HasSuffix(field.Key, "_sum"):
sum, ok := SampleSum(field.Value)
if !ok {
continue
}
m.Histogram.Sum = sum
case strings.HasSuffix(field.Key, "_count"):
count, ok := SampleCount(field.Value)
if !ok {
continue
}
m.Histogram.Count = count
default:
continue
}
singleEntry.Metrics[metricKey] = m
case telegraf.Summary:
if m == nil {
m = &Metric{
Labels: labels,
Time: metric.Time(),
AddTime: now,
Summary: &summary{},
}
} else {
m.Time = metric.Time()
m.AddTime = now
}
switch {
case strings.HasSuffix(field.Key, "_sum"):
sum, ok := SampleSum(field.Value)
if !ok {
continue
}
m.Summary.Sum = sum
case strings.HasSuffix(field.Key, "_count"):
count, ok := SampleCount(field.Value)
if !ok {
continue
}
m.Summary.Count = count
default:
quantileTag, ok := metric.GetTag("quantile")
if !ok {
continue
}
singleQuantile, err := strconv.ParseFloat(quantileTag, 64)
if err != nil {
continue
}
value, ok := SampleValue(field.Value)
if !ok {
continue
}
m.Summary.merge(quantile{
Quantile: singleQuantile,
Value: value,
})
}
singleEntry.Metrics[metricKey] = m
}
}
}
func (c *Collection) Expire(now time.Time, age time.Duration) {
expireTime := now.Add(-age)
for _, entry := range c.Entries {
for key, metric := range entry.Metrics {
if metric.AddTime.Before(expireTime) {
delete(entry.Metrics, key)
if len(entry.Metrics) == 0 {
delete(c.Entries, entry.Family)
}
}
}
}
}
func (c *Collection) GetEntries() []entry {
entries := make([]entry, 0, len(c.Entries))
for _, entry := range c.Entries {
entries = append(entries, entry)
}
if c.config.SortMetrics {
sort.Slice(entries, func(i, j int) bool {
lhs := entries[i].Family
rhs := entries[j].Family
if lhs.Name != rhs.Name {
return lhs.Name < rhs.Name
}
return lhs.Type < rhs.Type
})
}
return entries
}
func (c *Collection) GetMetrics(entry entry) []*Metric {
metrics := make([]*Metric, 0, len(entry.Metrics))
for _, metric := range entry.Metrics {
metrics = append(metrics, metric)
}
if c.config.SortMetrics {
sort.Slice(metrics, func(i, j int) bool {
lhs := metrics[i].Labels
rhs := metrics[j].Labels
if len(lhs) != len(rhs) {
return len(lhs) < len(rhs)
}
for index := range lhs {
l := lhs[index]
r := rhs[index]
if l.Name != r.Name {
return l.Name < r.Name
}
if l.Value != r.Value {
return l.Value < r.Value
}
}
return false
})
}
return metrics
}
func (c *Collection) GetProto() []*dto.MetricFamily {
result := make([]*dto.MetricFamily, 0, len(c.Entries))
for _, entry := range c.GetEntries() {
mf := &dto.MetricFamily{
Name: proto.String(entry.Family.Name),
Type: MetricType(entry.Family.Type),
}
if !c.config.CompactEncoding {
mf.Help = proto.String(helpString)
}
for _, metric := range c.GetMetrics(entry) {
l := make([]*dto.LabelPair, 0, len(metric.Labels))
for _, label := range metric.Labels {
l = append(l, &dto.LabelPair{
Name: proto.String(label.Name),
Value: proto.String(label.Value),
})
}
m := &dto.Metric{
Label: l,
}
if c.config.ExportTimestamp {
m.TimestampMs = proto.Int64(metric.Time.UnixNano() / int64(time.Millisecond))
}
switch entry.Family.Type {
case telegraf.Gauge:
m.Gauge = &dto.Gauge{Value: proto.Float64(metric.Scaler.Value)}
case telegraf.Counter:
m.Counter = &dto.Counter{Value: proto.Float64(metric.Scaler.Value)}
case telegraf.Untyped:
m.Untyped = &dto.Untyped{Value: proto.Float64(metric.Scaler.Value)}
case telegraf.Histogram:
buckets := make([]*dto.Bucket, 0, len(metric.Histogram.Buckets))
for _, bucket := range metric.Histogram.Buckets {
buckets = append(buckets, &dto.Bucket{
UpperBound: proto.Float64(bucket.Bound),
CumulativeCount: proto.Uint64(bucket.Count),
})
}
m.Histogram = &dto.Histogram{
Bucket: buckets,
SampleCount: proto.Uint64(metric.Histogram.Count),
SampleSum: proto.Float64(metric.Histogram.Sum),
}
case telegraf.Summary:
quantiles := make([]*dto.Quantile, 0, len(metric.Summary.Quantiles))
for _, quantile := range metric.Summary.Quantiles {
quantiles = append(quantiles, &dto.Quantile{
Quantile: proto.Float64(quantile.Quantile),
Value: proto.Float64(quantile.Value),
})
}
m.Summary = &dto.Summary{
Quantile: quantiles,
SampleCount: proto.Uint64(metric.Summary.Count),
SampleSum: proto.Float64(metric.Summary.Sum),
}
default:
panic("unknown telegraf.ValueType")
}
mf.Metric = append(mf.Metric, m)
}
if len(mf.Metric) != 0 {
result = append(result, mf)
}
}
return result
}

View file

@ -0,0 +1,845 @@
package prometheus
import (
"math"
"testing"
"time"
dto "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
type Input struct {
metric telegraf.Metric
addtime time.Time
}
func TestCollectionExpire(t *testing.T) {
tests := []struct {
name string
now time.Time
age time.Duration
input []Input
expected []*dto.MetricFamily
}{
{
name: "not expired",
now: time.Unix(1, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
addtime: time.Unix(0, 0),
},
},
expected: []*dto.MetricFamily{
{
Name: proto.String("cpu_time_idle"),
Help: proto.String(helpString),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
{
Label: make([]*dto.LabelPair, 0),
Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
},
},
},
},
},
{
name: "update metric expiration",
now: time.Unix(20, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
addtime: time.Unix(0, 0),
},
{
metric: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 43.0,
},
time.Unix(12, 0),
),
addtime: time.Unix(12, 0),
},
},
expected: []*dto.MetricFamily{
{
Name: proto.String("cpu_time_idle"),
Help: proto.String(helpString),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
{
Label: make([]*dto.LabelPair, 0),
Untyped: &dto.Untyped{Value: proto.Float64(43.0)},
},
},
},
},
},
{
name: "update metric expiration descending order",
now: time.Unix(20, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(12, 0),
),
addtime: time.Unix(12, 0),
}, {
metric: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 43.0,
},
time.Unix(0, 0),
),
addtime: time.Unix(0, 0),
},
},
expected: []*dto.MetricFamily{
{
Name: proto.String("cpu_time_idle"),
Help: proto.String(helpString),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
{
Label: make([]*dto.LabelPair, 0),
Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
},
},
},
},
},
{
name: "expired single metric in metric family",
now: time.Unix(20, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
addtime: time.Unix(0, 0),
},
},
expected: make([]*dto.MetricFamily, 0),
},
{
name: "expired one metric in metric family",
now: time.Unix(20, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_guest": 42.0,
},
time.Unix(15, 0),
),
addtime: time.Unix(15, 0),
},
},
expected: []*dto.MetricFamily{
{
Name: proto.String("cpu_time_guest"),
Help: proto.String(helpString),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
{
Label: make([]*dto.LabelPair, 0),
Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
},
},
},
},
},
{
name: "histogram bucket updates",
now: time.Unix(0, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"http_request_duration_seconds_sum": 10.0,
"http_request_duration_seconds_count": 2,
},
time.Unix(0, 0),
telegraf.Histogram,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.05"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 1.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"le": "+Inf"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 1.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
addtime: time.Unix(0, 0),
}, {
// Next interval
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"http_request_duration_seconds_sum": 20.0,
"http_request_duration_seconds_count": 4,
},
time.Unix(0, 0),
telegraf.Histogram,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.05"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 2.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"le": "+Inf"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 2.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
addtime: time.Unix(0, 0),
},
},
expected: []*dto.MetricFamily{
{
Name: proto.String("http_request_duration_seconds"),
Help: proto.String(helpString),
Type: dto.MetricType_HISTOGRAM.Enum(),
Metric: []*dto.Metric{
{
Label: make([]*dto.LabelPair, 0),
Histogram: &dto.Histogram{
SampleCount: proto.Uint64(4),
SampleSum: proto.Float64(20.0),
Bucket: []*dto.Bucket{
{
UpperBound: proto.Float64(0.05),
CumulativeCount: proto.Uint64(2),
},
{
UpperBound: proto.Float64(math.Inf(1)),
CumulativeCount: proto.Uint64(2),
},
},
},
},
},
},
},
},
{
name: "entire histogram expires",
now: time.Unix(20, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"http_request_duration_seconds_sum": 10.0,
"http_request_duration_seconds_count": 2,
},
time.Unix(0, 0),
telegraf.Histogram,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.05"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 1.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"le": "+Inf"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 1.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
addtime: time.Unix(0, 0),
},
},
expected: make([]*dto.MetricFamily, 0),
},
{
name: "histogram does not expire because of addtime from bucket",
now: time.Unix(20, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"prometheus",
map[string]string{"le": "+Inf"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 1.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"http_request_duration_seconds_sum": 10.0,
"http_request_duration_seconds_count": 2,
},
time.Unix(0, 0),
telegraf.Histogram,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.05"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 1.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
addtime: time.Unix(15, 0), // More recent addtime causes entire metric to stay valid
},
},
expected: []*dto.MetricFamily{
{
Name: proto.String("http_request_duration_seconds"),
Help: proto.String(helpString),
Type: dto.MetricType_HISTOGRAM.Enum(),
Metric: []*dto.Metric{
{
Label: make([]*dto.LabelPair, 0),
Histogram: &dto.Histogram{
SampleCount: proto.Uint64(2),
SampleSum: proto.Float64(10.0),
Bucket: []*dto.Bucket{
{
UpperBound: proto.Float64(math.Inf(1)),
CumulativeCount: proto.Uint64(1),
},
{
UpperBound: proto.Float64(0.05),
CumulativeCount: proto.Uint64(1),
},
},
},
},
},
},
},
},
{
name: "summary quantile updates",
now: time.Unix(0, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"rpc_duration_seconds_sum": 1.0,
"rpc_duration_seconds_count": 1,
},
time.Unix(0, 0),
telegraf.Summary,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.01"},
map[string]interface{}{
"rpc_duration_seconds": 1.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
addtime: time.Unix(0, 0),
}, {
// Updated Summary
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"rpc_duration_seconds_sum": 2.0,
"rpc_duration_seconds_count": 2,
},
time.Unix(0, 0),
telegraf.Summary,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.01"},
map[string]interface{}{
"rpc_duration_seconds": 2.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
addtime: time.Unix(0, 0),
},
},
expected: []*dto.MetricFamily{
{
Name: proto.String("rpc_duration_seconds"),
Help: proto.String(helpString),
Type: dto.MetricType_SUMMARY.Enum(),
Metric: []*dto.Metric{
{
Label: make([]*dto.LabelPair, 0),
Summary: &dto.Summary{
SampleCount: proto.Uint64(2),
SampleSum: proto.Float64(2.0),
Quantile: []*dto.Quantile{
{
Quantile: proto.Float64(0.01),
Value: proto.Float64(2),
},
},
},
},
},
},
},
},
{
name: "Entire summary expires",
now: time.Unix(20, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"rpc_duration_seconds_sum": 1.0,
"rpc_duration_seconds_count": 1,
},
time.Unix(0, 0),
telegraf.Summary,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.01"},
map[string]interface{}{
"rpc_duration_seconds": 1.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
addtime: time.Unix(0, 0),
},
},
expected: make([]*dto.MetricFamily, 0),
},
{
name: "summary does not expire because of quantile addtime",
now: time.Unix(20, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"rpc_duration_seconds_sum": 1.0,
"rpc_duration_seconds_count": 1,
},
time.Unix(0, 0),
telegraf.Summary,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.5"},
map[string]interface{}{
"rpc_duration_seconds": 10.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
addtime: time.Unix(0, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.01"},
map[string]interface{}{
"rpc_duration_seconds": 1.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
addtime: time.Unix(15, 0), // Recent addtime keeps entire metric around
},
},
expected: []*dto.MetricFamily{
{
Name: proto.String("rpc_duration_seconds"),
Help: proto.String(helpString),
Type: dto.MetricType_SUMMARY.Enum(),
Metric: []*dto.Metric{
{
Label: make([]*dto.LabelPair, 0),
Summary: &dto.Summary{
SampleSum: proto.Float64(1),
SampleCount: proto.Uint64(1),
Quantile: []*dto.Quantile{
{
Quantile: proto.Float64(0.5),
Value: proto.Float64(10),
},
{
Quantile: proto.Float64(0.01),
Value: proto.Float64(1),
},
},
},
},
},
},
},
},
{
name: "expire based on add time",
now: time.Unix(20, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
addtime: time.Unix(15, 0),
},
},
expected: []*dto.MetricFamily{
{
Name: proto.String("cpu_time_idle"),
Help: proto.String(helpString),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
{
Label: make([]*dto.LabelPair, 0),
Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewCollection(FormatConfig{})
for _, item := range tt.input {
c.Add(item.metric, item.addtime)
}
c.Expire(tt.now, tt.age)
actual := c.GetProto()
require.Equal(t, tt.expected, actual)
})
}
}
func TestExportTimestamps(t *testing.T) {
tests := []struct {
name string
now time.Time
age time.Duration
input []Input
expected []*dto.MetricFamily
}{
{
name: "histogram bucket updates",
now: time.Unix(23, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"http_request_duration_seconds_sum": 10.0,
"http_request_duration_seconds_count": 2,
},
time.Unix(15, 0),
telegraf.Histogram,
),
addtime: time.Unix(23, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.05"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 1.0,
},
time.Unix(15, 0),
telegraf.Histogram,
),
addtime: time.Unix(23, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"le": "+Inf"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 1.0,
},
time.Unix(15, 0),
telegraf.Histogram,
),
addtime: time.Unix(23, 0),
}, {
// Next interval
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"http_request_duration_seconds_sum": 20.0,
"http_request_duration_seconds_count": 4,
},
time.Unix(20, 0), // Updated timestamp
telegraf.Histogram,
),
addtime: time.Unix(23, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.05"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 2.0,
},
time.Unix(20, 0), // Updated timestamp
telegraf.Histogram,
),
addtime: time.Unix(23, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"le": "+Inf"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 2.0,
},
time.Unix(20, 0), // Updated timestamp
telegraf.Histogram,
),
addtime: time.Unix(23, 0),
},
},
expected: []*dto.MetricFamily{
{
Name: proto.String("http_request_duration_seconds"),
Help: proto.String(helpString),
Type: dto.MetricType_HISTOGRAM.Enum(),
Metric: []*dto.Metric{
{
Label: make([]*dto.LabelPair, 0),
TimestampMs: proto.Int64(time.Unix(20, 0).UnixNano() / int64(time.Millisecond)),
Histogram: &dto.Histogram{
SampleCount: proto.Uint64(4),
SampleSum: proto.Float64(20.0),
Bucket: []*dto.Bucket{
{
UpperBound: proto.Float64(0.05),
CumulativeCount: proto.Uint64(2),
},
{
UpperBound: proto.Float64(math.Inf(1)),
CumulativeCount: proto.Uint64(2),
},
},
},
},
},
},
},
},
{
name: "summary quantile updates",
now: time.Unix(23, 0),
age: 10 * time.Second,
input: []Input{
{
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"rpc_duration_seconds_sum": 1.0,
"rpc_duration_seconds_count": 1,
},
time.Unix(15, 0),
telegraf.Summary,
),
addtime: time.Unix(23, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.01"},
map[string]interface{}{
"rpc_duration_seconds": 1.0,
},
time.Unix(15, 0),
telegraf.Summary,
),
addtime: time.Unix(23, 0),
}, {
// Updated Summary
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"rpc_duration_seconds_sum": 2.0,
"rpc_duration_seconds_count": 2,
},
time.Unix(20, 0), // Updated timestamp
telegraf.Summary,
),
addtime: time.Unix(23, 0),
}, {
metric: testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.01"},
map[string]interface{}{
"rpc_duration_seconds": 2.0,
},
time.Unix(20, 0), // Updated timestamp
telegraf.Summary,
),
addtime: time.Unix(23, 0),
},
},
expected: []*dto.MetricFamily{
{
Name: proto.String("rpc_duration_seconds"),
Help: proto.String(helpString),
Type: dto.MetricType_SUMMARY.Enum(),
Metric: []*dto.Metric{
{
Label: make([]*dto.LabelPair, 0),
TimestampMs: proto.Int64(time.Unix(20, 0).UnixNano() / int64(time.Millisecond)),
Summary: &dto.Summary{
SampleCount: proto.Uint64(2),
SampleSum: proto.Float64(2.0),
Quantile: []*dto.Quantile{
{
Quantile: proto.Float64(0.01),
Value: proto.Float64(2),
},
},
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewCollection(FormatConfig{ExportTimestamp: true})
for _, item := range tt.input {
c.Add(item.metric, item.addtime)
}
c.Expire(tt.now, tt.age)
actual := c.GetProto()
require.Equal(t, tt.expected, actual)
})
}
}

View file

@ -0,0 +1,197 @@
package prometheus
import (
"strings"
"unicode"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"github.com/influxdata/telegraf"
)
type Table struct {
First *unicode.RangeTable
Rest *unicode.RangeTable
}
var MetricNameTable = Table{
First: &unicode.RangeTable{
R16: []unicode.Range16{
{0x003A, 0x003A, 1}, // :
{0x0041, 0x005A, 1}, // A-Z
{0x005F, 0x005F, 1}, // _
{0x0061, 0x007A, 1}, // a-z
},
LatinOffset: 4,
},
Rest: &unicode.RangeTable{
R16: []unicode.Range16{
{0x0030, 0x003A, 1}, // 0-:
{0x0041, 0x005A, 1}, // A-Z
{0x005F, 0x005F, 1}, // _
{0x0061, 0x007A, 1}, // a-z
},
LatinOffset: 4,
},
}
var LabelNameTable = Table{
First: &unicode.RangeTable{
R16: []unicode.Range16{
{0x0041, 0x005A, 1}, // A-Z
{0x005F, 0x005F, 1}, // _
{0x0061, 0x007A, 1}, // a-z
},
LatinOffset: 3,
},
Rest: &unicode.RangeTable{
R16: []unicode.Range16{
{0x0030, 0x0039, 1}, // 0-9
{0x0041, 0x005A, 1}, // A-Z
{0x005F, 0x005F, 1}, // _
{0x0061, 0x007A, 1}, // a-z
},
LatinOffset: 4,
},
}
// Sanitize checks if the name is valid according to the table. If not, it
// attempts to replaces invalid runes with an underscore to create a valid
// name.
func sanitize(name string, table Table) (string, bool) {
var b strings.Builder
for i, r := range name {
switch i {
case 0:
if unicode.In(r, table.First) {
b.WriteRune(r)
}
default:
if unicode.In(r, table.Rest) {
b.WriteRune(r)
} else {
b.WriteString("_")
}
}
}
name = strings.Trim(b.String(), "_")
if name == "" {
return "", false
}
return name, true
}
// SanitizeMetricName checks if the name is a valid Prometheus metric name. If
// not, it attempts to replaces invalid runes with an underscore to create a
// valid name.
func SanitizeMetricName(name string) (string, bool) {
if model.IsValidLegacyMetricName(name) {
return name, true
}
return sanitize(name, MetricNameTable)
}
// SanitizeLabelName checks if the name is a valid Prometheus label name. If
// not, it attempts to replaces invalid runes with an underscore to create a
// valid name.
func SanitizeLabelName(name string) (string, bool) {
if model.LabelName(name).IsValidLegacy() {
return name, true
}
return sanitize(name, LabelNameTable)
}
// MetricName returns the Prometheus metric name.
func MetricName(measurement, fieldKey string, valueType telegraf.ValueType) string {
switch valueType {
case telegraf.Histogram, telegraf.Summary:
switch {
case strings.HasSuffix(fieldKey, "_bucket"):
fieldKey = strings.TrimSuffix(fieldKey, "_bucket")
case strings.HasSuffix(fieldKey, "_sum"):
fieldKey = strings.TrimSuffix(fieldKey, "_sum")
case strings.HasSuffix(fieldKey, "_count"):
fieldKey = strings.TrimSuffix(fieldKey, "_count")
}
}
if measurement == "prometheus" {
return fieldKey
}
return measurement + "_" + fieldKey
}
func MetricType(valueType telegraf.ValueType) *dto.MetricType {
switch valueType {
case telegraf.Counter:
return dto.MetricType_COUNTER.Enum()
case telegraf.Gauge:
return dto.MetricType_GAUGE.Enum()
case telegraf.Summary:
return dto.MetricType_SUMMARY.Enum()
case telegraf.Untyped:
return dto.MetricType_UNTYPED.Enum()
case telegraf.Histogram:
return dto.MetricType_HISTOGRAM.Enum()
default:
panic("unknown telegraf.ValueType")
}
}
// SampleValue converts a field value into a value suitable for a simple sample value.
func SampleValue(value interface{}) (float64, bool) {
switch v := value.(type) {
case float64:
return v, true
case int64:
return float64(v), true
case uint64:
return float64(v), true
case bool:
if v {
return 1.0, true
}
return 0.0, true
default:
return 0, false
}
}
// SampleCount converts a field value into a count suitable for a metric family
// of the Histogram or Summary type.
func SampleCount(value interface{}) (uint64, bool) {
switch v := value.(type) {
case float64:
if v < 0 {
return 0, false
}
return uint64(v), true
case int64:
if v < 0 {
return 0, false
}
return uint64(v), true
case uint64:
return v, true
default:
return 0, false
}
}
// SampleSum converts a field value into a sum suitable for a metric family
// of the Histogram or Summary type.
func SampleSum(value interface{}) (float64, bool) {
switch v := value.(type) {
case float64:
return v, true
case int64:
return float64(v), true
case uint64:
return float64(v), true
default:
return 0, false
}
}

View file

@ -0,0 +1,95 @@
package prometheus
import (
"bytes"
"fmt"
"time"
"github.com/prometheus/common/expfmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/plugins/serializers"
)
type MetricTypes struct {
Counter []string `toml:"counter"`
Gauge []string `toml:"gauge"`
filterCounter filter.Filter
filterGauge filter.Filter
}
func (mt *MetricTypes) Init() error {
// Setup the explicit type mappings
var err error
mt.filterCounter, err = filter.Compile(mt.Counter)
if err != nil {
return fmt.Errorf("creating counter filter failed: %w", err)
}
mt.filterGauge, err = filter.Compile(mt.Gauge)
if err != nil {
return fmt.Errorf("creating gauge filter failed: %w", err)
}
return nil
}
func (mt *MetricTypes) DetermineType(name string, m telegraf.Metric) telegraf.ValueType {
metricType := m.Type()
if mt.filterCounter != nil && mt.filterCounter.Match(name) {
metricType = telegraf.Counter
}
if mt.filterGauge != nil && mt.filterGauge.Match(name) {
metricType = telegraf.Gauge
}
return metricType
}
type FormatConfig struct {
ExportTimestamp bool `toml:"prometheus_export_timestamp"`
SortMetrics bool `toml:"prometheus_sort_metrics"`
StringAsLabel bool `toml:"prometheus_string_as_label"`
// CompactEncoding defines whether to include
// HELP metadata in Prometheus payload. Setting to true
// helps to reduce payload size.
CompactEncoding bool `toml:"prometheus_compact_encoding"`
TypeMappings MetricTypes `toml:"prometheus_metric_types"`
}
type Serializer struct {
FormatConfig
}
func (s *Serializer) Init() error {
return s.FormatConfig.TypeMappings.Init()
}
func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) {
return s.SerializeBatch([]telegraf.Metric{metric})
}
func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
coll := NewCollection(s.FormatConfig)
for _, metric := range metrics {
coll.Add(metric, time.Now())
}
var buf bytes.Buffer
for _, mf := range coll.GetProto() {
enc := expfmt.NewEncoder(&buf, expfmt.NewFormat(expfmt.TypeTextPlain))
err := enc.Encode(mf)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
func init() {
serializers.Add("prometheus",
func() telegraf.Serializer {
return &Serializer{}
},
)
}

View file

@ -0,0 +1,786 @@
package prometheus
import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/telegraf/testutil"
)
func TestSerialize(t *testing.T) {
tests := []struct {
name string
config FormatConfig
metric telegraf.Metric
expected []byte
}{
{
name: "simple",
metric: testutil.MustMetric(
"cpu",
map[string]string{
"host": "example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host="example.org"} 42
`),
},
{
name: "prometheus input untyped",
metric: testutil.MustMetric(
"prometheus",
map[string]string{
"code": "400",
"method": "post",
},
map[string]interface{}{
"http_requests_total": 3.0,
},
time.Unix(0, 0),
telegraf.Untyped,
),
expected: []byte(`
# HELP http_requests_total Telegraf collected metric
# TYPE http_requests_total untyped
http_requests_total{code="400",method="post"} 3
`),
},
{
name: "prometheus input counter",
metric: testutil.MustMetric(
"prometheus",
map[string]string{
"code": "400",
"method": "post",
},
map[string]interface{}{
"http_requests_total": 3.0,
},
time.Unix(0, 0),
telegraf.Counter,
),
expected: []byte(`
# HELP http_requests_total Telegraf collected metric
# TYPE http_requests_total counter
http_requests_total{code="400",method="post"} 3
`),
},
{
name: "prometheus input gauge",
metric: testutil.MustMetric(
"prometheus",
map[string]string{
"code": "400",
"method": "post",
},
map[string]interface{}{
"http_requests_total": 3.0,
},
time.Unix(0, 0),
telegraf.Gauge,
),
expected: []byte(`
# HELP http_requests_total Telegraf collected metric
# TYPE http_requests_total gauge
http_requests_total{code="400",method="post"} 3
`),
},
{
name: "prometheus input histogram no buckets",
metric: testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"http_request_duration_seconds_sum": 53423,
"http_request_duration_seconds_count": 144320,
},
time.Unix(0, 0),
telegraf.Histogram,
),
expected: []byte(`
# HELP http_request_duration_seconds Telegraf collected metric
# TYPE http_request_duration_seconds histogram
http_request_duration_seconds_bucket{le="+Inf"} 144320
http_request_duration_seconds_sum 53423
http_request_duration_seconds_count 144320
`),
},
{
name: "prometheus input histogram only bucket",
metric: testutil.MustMetric(
"prometheus",
map[string]string{
"le": "0.5",
},
map[string]interface{}{
"http_request_duration_seconds_bucket": 129389.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
expected: []byte(`
# HELP http_request_duration_seconds Telegraf collected metric
# TYPE http_request_duration_seconds histogram
http_request_duration_seconds_bucket{le="0.5"} 129389
http_request_duration_seconds_bucket{le="+Inf"} 0
http_request_duration_seconds_sum 0
http_request_duration_seconds_count 0
`),
},
{
name: "simple with timestamp",
config: FormatConfig{
ExportTimestamp: true,
},
metric: testutil.MustMetric(
"cpu",
map[string]string{
"host": "example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(1574279268, 0),
),
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host="example.org"} 42 1574279268000
`),
},
{
name: "simple with CompactEncoding",
config: FormatConfig{
CompactEncoding: true,
},
metric: testutil.MustMetric(
"cpu",
map[string]string{
"host": "example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(1574279268, 0),
),
expected: []byte(`
# TYPE cpu_time_idle untyped
cpu_time_idle{host="example.org"} 42
`),
},
{
name: "untyped forced to counter",
config: FormatConfig{
TypeMappings: MetricTypes{Counter: []string{"cpu_time_idle"}},
},
metric: testutil.MustMetric(
"cpu",
map[string]string{
"host": "example.org",
},
map[string]interface{}{
"time_idle": 42,
},
time.Unix(0, 0),
),
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle counter
cpu_time_idle{host="example.org"} 42
`),
},
{
name: "untyped forced to gauge",
config: FormatConfig{
TypeMappings: MetricTypes{Gauge: []string{"cpu_time_idle"}},
},
metric: testutil.MustMetric(
"cpu",
map[string]string{
"host": "example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle gauge
cpu_time_idle{host="example.org"} 42
`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Serializer{
FormatConfig{
SortMetrics: true,
ExportTimestamp: tt.config.ExportTimestamp,
StringAsLabel: tt.config.StringAsLabel,
CompactEncoding: tt.config.CompactEncoding,
TypeMappings: tt.config.TypeMappings,
},
}
require.NoError(t, s.Init())
actual, err := s.Serialize(tt.metric)
require.NoError(t, err)
require.Equal(t, strings.TrimSpace(string(tt.expected)),
strings.TrimSpace(string(actual)))
})
}
}
func TestSerializeBatch(t *testing.T) {
tests := []struct {
name string
config FormatConfig
metrics []telegraf.Metric
expected []byte
}{
{
name: "simple",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"host": "one.example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{
"host": "two.example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host="one.example.org"} 42
cpu_time_idle{host="two.example.org"} 42
`),
},
{
name: "multiple metric families",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"host": "one.example.org",
},
map[string]interface{}{
"time_idle": 42.0,
"time_guest": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_guest Telegraf collected metric
# TYPE cpu_time_guest untyped
cpu_time_guest{host="one.example.org"} 42
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host="one.example.org"} 42
`),
},
{
name: "histogram",
metrics: []telegraf.Metric{
testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"http_request_duration_seconds_sum": 53423,
"http_request_duration_seconds_count": 144320,
},
time.Unix(0, 0),
telegraf.Histogram,
),
testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.05"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 24054.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.1"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 33444.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.2"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 100392.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
testutil.MustMetric(
"prometheus",
map[string]string{"le": "0.5"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 129389.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
testutil.MustMetric(
"prometheus",
map[string]string{"le": "1.0"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 133988.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
testutil.MustMetric(
"prometheus",
map[string]string{"le": "+Inf"},
map[string]interface{}{
"http_request_duration_seconds_bucket": 144320.0,
},
time.Unix(0, 0),
telegraf.Histogram,
),
},
expected: []byte(`
# HELP http_request_duration_seconds Telegraf collected metric
# TYPE http_request_duration_seconds histogram
http_request_duration_seconds_bucket{le="0.05"} 24054
http_request_duration_seconds_bucket{le="0.1"} 33444
http_request_duration_seconds_bucket{le="0.2"} 100392
http_request_duration_seconds_bucket{le="0.5"} 129389
http_request_duration_seconds_bucket{le="1"} 133988
http_request_duration_seconds_bucket{le="+Inf"} 144320
http_request_duration_seconds_sum 53423
http_request_duration_seconds_count 144320
`),
},
{
name: "",
metrics: []telegraf.Metric{
testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"rpc_duration_seconds_sum": 1.7560473e+07,
"rpc_duration_seconds_count": 2693,
},
time.Unix(0, 0),
telegraf.Summary,
),
testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.01"},
map[string]interface{}{
"rpc_duration_seconds": 3102.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.05"},
map[string]interface{}{
"rpc_duration_seconds": 3272.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.5"},
map[string]interface{}{
"rpc_duration_seconds": 4773.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.9"},
map[string]interface{}{
"rpc_duration_seconds": 9001.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
testutil.MustMetric(
"prometheus",
map[string]string{"quantile": "0.99"},
map[string]interface{}{
"rpc_duration_seconds": 76656.0,
},
time.Unix(0, 0),
telegraf.Summary,
),
},
expected: []byte(`
# HELP rpc_duration_seconds Telegraf collected metric
# TYPE rpc_duration_seconds summary
rpc_duration_seconds{quantile="0.01"} 3102
rpc_duration_seconds{quantile="0.05"} 3272
rpc_duration_seconds{quantile="0.5"} 4773
rpc_duration_seconds{quantile="0.9"} 9001
rpc_duration_seconds{quantile="0.99"} 76656
rpc_duration_seconds_sum 1.7560473e+07
rpc_duration_seconds_count 2693
`),
},
{
name: "newer sample",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 43.0,
},
time.Unix(1, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle 43
`),
},
{
name: "colons are not replaced in metric name from measurement",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu::xyzzy",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu::xyzzy_time_idle Telegraf collected metric
# TYPE cpu::xyzzy_time_idle untyped
cpu::xyzzy_time_idle 42
`),
},
{
name: "colons are not replaced in metric name from field",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time:idle": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time:idle Telegraf collected metric
# TYPE cpu_time:idle untyped
cpu_time:idle 42
`),
},
{
name: "invalid label",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"host-name": "example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host_name="example.org"} 42
`),
},
{
name: "colons are replaced in label name",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"host:name": "example.org",
},
map[string]interface{}{
"time_idle": 42.0,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host_name="example.org"} 42
`),
},
{
name: "discard strings",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
"cpu": "cpu0",
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle 42
`),
},
{
name: "string as label",
config: FormatConfig{
StringAsLabel: true,
},
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"time_idle": 42.0,
"cpu": "cpu0",
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{cpu="cpu0"} 42
`),
},
{
name: "string as label duplicate tag",
config: FormatConfig{
StringAsLabel: true,
},
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_idle": 42.0,
"cpu": "cpu1",
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{cpu="cpu0"} 42
`),
},
{
name: "replace characters when using string as label",
config: FormatConfig{
StringAsLabel: true,
},
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{},
map[string]interface{}{
"host:name": "example.org",
"time_idle": 42.0,
},
time.Unix(1574279268, 0),
),
},
expected: []byte(`
# HELP cpu_time_idle Telegraf collected metric
# TYPE cpu_time_idle untyped
cpu_time_idle{host_name="example.org"} 42
`),
},
{
name: "multiple fields grouping",
metrics: []telegraf.Metric{
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{
"time_guest": 8106.04,
"time_system": 26271.4,
"time_user": 92904.33,
},
time.Unix(0, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu1",
},
map[string]interface{}{
"time_guest": 8181.63,
"time_system": 25351.49,
"time_user": 96912.57,
},
time.Unix(0, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu2",
},
map[string]interface{}{
"time_guest": 7470.04,
"time_system": 24998.43,
"time_user": 96034.08,
},
time.Unix(0, 0),
),
testutil.MustMetric(
"cpu",
map[string]string{
"cpu": "cpu3",
},
map[string]interface{}{
"time_guest": 7517.95,
"time_system": 24970.82,
"time_user": 94148,
},
time.Unix(0, 0),
),
},
expected: []byte(`
# HELP cpu_time_guest Telegraf collected metric
# TYPE cpu_time_guest untyped
cpu_time_guest{cpu="cpu0"} 8106.04
cpu_time_guest{cpu="cpu1"} 8181.63
cpu_time_guest{cpu="cpu2"} 7470.04
cpu_time_guest{cpu="cpu3"} 7517.95
# HELP cpu_time_system Telegraf collected metric
# TYPE cpu_time_system untyped
cpu_time_system{cpu="cpu0"} 26271.4
cpu_time_system{cpu="cpu1"} 25351.49
cpu_time_system{cpu="cpu2"} 24998.43
cpu_time_system{cpu="cpu3"} 24970.82
# HELP cpu_time_user Telegraf collected metric
# TYPE cpu_time_user untyped
cpu_time_user{cpu="cpu0"} 92904.33
cpu_time_user{cpu="cpu1"} 96912.57
cpu_time_user{cpu="cpu2"} 96034.08
cpu_time_user{cpu="cpu3"} 94148
`),
},
{
name: "summary with no quantile",
metrics: []telegraf.Metric{
testutil.MustMetric(
"prometheus",
map[string]string{},
map[string]interface{}{
"rpc_duration_seconds_sum": 1.7560473e+07,
"rpc_duration_seconds_count": 2693,
},
time.Unix(0, 0),
telegraf.Summary,
),
},
expected: []byte(`
# HELP rpc_duration_seconds Telegraf collected metric
# TYPE rpc_duration_seconds summary
rpc_duration_seconds_sum 1.7560473e+07
rpc_duration_seconds_count 2693
`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Serializer{
FormatConfig{
SortMetrics: true,
ExportTimestamp: tt.config.ExportTimestamp,
StringAsLabel: tt.config.StringAsLabel,
},
}
actual, err := s.SerializeBatch(tt.metrics)
require.NoError(t, err)
require.Equal(t,
strings.TrimSpace(string(tt.expected)),
strings.TrimSpace(string(actual)))
})
}
}
func BenchmarkSerialize(b *testing.B) {
s := &Serializer{}
require.NoError(b, s.Init())
metrics := serializers.BenchmarkMetrics(b)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.Serialize(metrics[i%len(metrics)])
require.NoError(b, err)
}
}
func BenchmarkSerializeBatch(b *testing.B) {
s := &Serializer{}
require.NoError(b, s.Init())
m := serializers.BenchmarkMetrics(b)
metrics := m[:]
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.SerializeBatch(metrics)
require.NoError(b, err)
}
}