Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
1
plugins/outputs/all/all.go
Normal file
1
plugins/outputs/all/all.go
Normal file
|
@ -0,0 +1 @@
|
|||
package all
|
5
plugins/outputs/all/amon.go
Normal file
5
plugins/outputs/all/amon.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.amon
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/amon" // register plugin
|
5
plugins/outputs/all/amqp.go
Normal file
5
plugins/outputs/all/amqp.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.amqp
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/amqp" // register plugin
|
5
plugins/outputs/all/application_insights.go
Normal file
5
plugins/outputs/all/application_insights.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.application_insights
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/application_insights" // register plugin
|
5
plugins/outputs/all/azure_data_explorer.go
Normal file
5
plugins/outputs/all/azure_data_explorer.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.azure_data_explorer
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/azure_data_explorer" // register plugin
|
5
plugins/outputs/all/azure_monitor.go
Normal file
5
plugins/outputs/all/azure_monitor.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.azure_monitor
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" // register plugin
|
5
plugins/outputs/all/bigquery.go
Normal file
5
plugins/outputs/all/bigquery.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.bigquery
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/bigquery" // register plugin
|
5
plugins/outputs/all/clarify.go
Normal file
5
plugins/outputs/all/clarify.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.clarify
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/clarify" // register plugin
|
5
plugins/outputs/all/cloud_pubsub.go
Normal file
5
plugins/outputs/all/cloud_pubsub.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.cloud_pubsub
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/cloud_pubsub" // register plugin
|
5
plugins/outputs/all/cloudwatch.go
Normal file
5
plugins/outputs/all/cloudwatch.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.cloudwatch
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" // register plugin
|
5
plugins/outputs/all/cloudwatch_logs.go
Normal file
5
plugins/outputs/all/cloudwatch_logs.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.cloudwatch_logs
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch_logs" // register plugin
|
5
plugins/outputs/all/cratedb.go
Normal file
5
plugins/outputs/all/cratedb.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.cratedb
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/cratedb" // register plugin
|
5
plugins/outputs/all/datadog.go
Normal file
5
plugins/outputs/all/datadog.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.datadog
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/datadog" // register plugin
|
5
plugins/outputs/all/discard.go
Normal file
5
plugins/outputs/all/discard.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.discard
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/discard" // register plugin
|
5
plugins/outputs/all/dynatrace.go
Normal file
5
plugins/outputs/all/dynatrace.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.dynatrace
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/dynatrace" // register plugin
|
5
plugins/outputs/all/elasticsearch.go
Normal file
5
plugins/outputs/all/elasticsearch.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.elasticsearch
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/elasticsearch" // register plugin
|
5
plugins/outputs/all/event_hubs.go
Normal file
5
plugins/outputs/all/event_hubs.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.event_hubs
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/event_hubs" // register plugin
|
5
plugins/outputs/all/exec.go
Normal file
5
plugins/outputs/all/exec.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.exec
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/exec" // register plugin
|
5
plugins/outputs/all/execd.go
Normal file
5
plugins/outputs/all/execd.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.execd
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/execd" // register plugin
|
5
plugins/outputs/all/file.go
Normal file
5
plugins/outputs/all/file.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.file
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/file" // register plugin
|
5
plugins/outputs/all/graphite.go
Normal file
5
plugins/outputs/all/graphite.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.graphite
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/graphite" // register plugin
|
5
plugins/outputs/all/graylog.go
Normal file
5
plugins/outputs/all/graylog.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.graylog
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/graylog" // register plugin
|
5
plugins/outputs/all/groundwork.go
Normal file
5
plugins/outputs/all/groundwork.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.groundwork
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/groundwork" // register plugin
|
5
plugins/outputs/all/health.go
Normal file
5
plugins/outputs/all/health.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.health
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/health" // register plugin
|
5
plugins/outputs/all/http.go
Normal file
5
plugins/outputs/all/http.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.http
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/http" // register plugin
|
5
plugins/outputs/all/influxdb.go
Normal file
5
plugins/outputs/all/influxdb.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.influxdb
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/influxdb" // register plugin
|
5
plugins/outputs/all/influxdb_v2.go
Normal file
5
plugins/outputs/all/influxdb_v2.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.influxdb_v2
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" // register plugin
|
5
plugins/outputs/all/instrumental.go
Normal file
5
plugins/outputs/all/instrumental.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.instrumental
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/instrumental" // register plugin
|
5
plugins/outputs/all/iotdb.go
Normal file
5
plugins/outputs/all/iotdb.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.iotdb
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/iotdb" // register plugin
|
5
plugins/outputs/all/kafka.go
Normal file
5
plugins/outputs/all/kafka.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.kafka
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/kafka" // register plugin
|
5
plugins/outputs/all/kinesis.go
Normal file
5
plugins/outputs/all/kinesis.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.kinesis
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/kinesis" // register plugin
|
5
plugins/outputs/all/librato.go
Normal file
5
plugins/outputs/all/librato.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.librato
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/librato" // register plugin
|
5
plugins/outputs/all/logzio.go
Normal file
5
plugins/outputs/all/logzio.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.logzio
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/logzio" // register plugin
|
5
plugins/outputs/all/loki.go
Normal file
5
plugins/outputs/all/loki.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.loki
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/loki" // register plugin
|
5
plugins/outputs/all/mongodb.go
Normal file
5
plugins/outputs/all/mongodb.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.mongodb
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/mongodb" // register plugin
|
5
plugins/outputs/all/mqtt.go
Normal file
5
plugins/outputs/all/mqtt.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.mqtt
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/mqtt" // register plugin
|
5
plugins/outputs/all/nats.go
Normal file
5
plugins/outputs/all/nats.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.nats
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/nats" // register plugin
|
5
plugins/outputs/all/nebius_cloud_monitoring.go
Normal file
5
plugins/outputs/all/nebius_cloud_monitoring.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.nebius_cloud_monitoring
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/nebius_cloud_monitoring" // register plugin
|
5
plugins/outputs/all/newrelic.go
Normal file
5
plugins/outputs/all/newrelic.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.newrelic
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/newrelic" // register plugin
|
5
plugins/outputs/all/nsq.go
Normal file
5
plugins/outputs/all/nsq.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.nsq
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/nsq" // register plugin
|
5
plugins/outputs/all/opensearch.go
Normal file
5
plugins/outputs/all/opensearch.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.opensearch
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/opensearch" // register plugin
|
5
plugins/outputs/all/opentelemetry.go
Normal file
5
plugins/outputs/all/opentelemetry.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.opentelemetry
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/opentelemetry" // register plugin
|
5
plugins/outputs/all/opentsdb.go
Normal file
5
plugins/outputs/all/opentsdb.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.opentsdb
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" // register plugin
|
5
plugins/outputs/all/parquet.go
Normal file
5
plugins/outputs/all/parquet.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.parquet
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/parquet" // register plugin
|
5
plugins/outputs/all/postgresql.go
Normal file
5
plugins/outputs/all/postgresql.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.postgresql
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/postgresql" // register plugin
|
5
plugins/outputs/all/prometheus_client.go
Normal file
5
plugins/outputs/all/prometheus_client.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.prometheus_client
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" // register plugin
|
5
plugins/outputs/all/quix.go
Normal file
5
plugins/outputs/all/quix.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.quix
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/quix" // register plugin
|
5
plugins/outputs/all/redistimeseries.go
Normal file
5
plugins/outputs/all/redistimeseries.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.redistimeseries
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/redistimeseries" // register plugin
|
5
plugins/outputs/all/remotefile.go
Normal file
5
plugins/outputs/all/remotefile.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.remotefile
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/remotefile" // register plugin
|
5
plugins/outputs/all/riemann.go
Normal file
5
plugins/outputs/all/riemann.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.riemann
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/riemann" // register plugin
|
5
plugins/outputs/all/sensu.go
Normal file
5
plugins/outputs/all/sensu.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.sensu
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/sensu" // register plugin
|
5
plugins/outputs/all/signalfx.go
Normal file
5
plugins/outputs/all/signalfx.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.signalfx
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/signalfx" // register plugin
|
5
plugins/outputs/all/socket_writer.go
Normal file
5
plugins/outputs/all/socket_writer.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.socket_writer
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/socket_writer" // register plugin
|
5
plugins/outputs/all/sql.go
Normal file
5
plugins/outputs/all/sql.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.sql
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/sql" // register plugin
|
5
plugins/outputs/all/stackdriver.go
Normal file
5
plugins/outputs/all/stackdriver.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.stackdriver
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" // register plugin
|
5
plugins/outputs/all/stomp.go
Normal file
5
plugins/outputs/all/stomp.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.stomp
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/stomp" // register plugin
|
5
plugins/outputs/all/sumologic.go
Normal file
5
plugins/outputs/all/sumologic.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.sumologic
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/sumologic" // register plugin
|
5
plugins/outputs/all/syslog.go
Normal file
5
plugins/outputs/all/syslog.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.syslog
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/syslog" // register plugin
|
5
plugins/outputs/all/timestream.go
Normal file
5
plugins/outputs/all/timestream.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.timestream
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/timestream" // register plugin
|
5
plugins/outputs/all/warp10.go
Normal file
5
plugins/outputs/all/warp10.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.warp10
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/warp10" // register plugin
|
5
plugins/outputs/all/wavefront.go
Normal file
5
plugins/outputs/all/wavefront.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.wavefront
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/wavefront" // register plugin
|
5
plugins/outputs/all/websocket.go
Normal file
5
plugins/outputs/all/websocket.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.websocket
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/websocket" // register plugin
|
5
plugins/outputs/all/yandex_cloud_monitoring.go
Normal file
5
plugins/outputs/all/yandex_cloud_monitoring.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.yandex_cloud_monitoring
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/yandex_cloud_monitoring" // register plugin
|
5
plugins/outputs/all/zabbix.go
Normal file
5
plugins/outputs/all/zabbix.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
//go:build !custom || outputs || outputs.zabbix
|
||||
|
||||
package all
|
||||
|
||||
import _ "github.com/influxdata/telegraf/plugins/outputs/zabbix" // register plugin
|
44
plugins/outputs/amon/README.md
Normal file
44
plugins/outputs/amon/README.md
Normal file
|
@ -0,0 +1,44 @@
|
|||
# Amon Output Plugin
|
||||
|
||||
This plugin writes metrics to [Amon monitoring platform][amon]. It requires a
|
||||
`serverkey` and `amoninstance` URL which can be obtained [here][amon_monitoring]
|
||||
for your account.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> If point values being sent cannot be converted to a `float64`, the metric is
|
||||
> skipped.
|
||||
|
||||
⭐ Telegraf v0.2.1
|
||||
🏷️ datastore
|
||||
💻 all
|
||||
|
||||
[amon]: https://www.amon.cx
|
||||
[amon_monitoring]:https://www.amon.cx/docs/monitoring/
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Configuration for Amon Server to send metrics to.
|
||||
[[outputs.amon]]
|
||||
## Amon Server Key
|
||||
server_key = "my-server-key" # required.
|
||||
|
||||
## Amon Instance URL
|
||||
amon_instance = "https://youramoninstance" # required
|
||||
|
||||
## Connection timeout.
|
||||
# timeout = "5s"
|
||||
```
|
||||
|
||||
## Conversions
|
||||
|
||||
Metrics are grouped by converting any `_` characters to `.` in the point name
|
151
plugins/outputs/amon/amon.go
Normal file
151
plugins/outputs/amon/amon.go
Normal file
|
@ -0,0 +1,151 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package amon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type Amon struct {
|
||||
ServerKey string `toml:"server_key"`
|
||||
AmonInstance string `toml:"amon_instance"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
type TimeSeries struct {
|
||||
Series []*Metric `json:"series"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Metric string `json:"metric"`
|
||||
Points [1]Point `json:"metrics"`
|
||||
}
|
||||
|
||||
type Point [2]float64
|
||||
|
||||
func (*Amon) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (a *Amon) Connect() error {
|
||||
if a.ServerKey == "" || a.AmonInstance == "" {
|
||||
return errors.New("serverkey and amon_instance are required fields for amon output")
|
||||
}
|
||||
a.client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
Timeout: time.Duration(a.Timeout),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Amon) Write(metrics []telegraf.Metric) error {
|
||||
if len(metrics) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
metricCounter := 0
|
||||
tempSeries := make([]*Metric, 0, len(metrics))
|
||||
for _, m := range metrics {
|
||||
mname := strings.ReplaceAll(m.Name(), "_", ".")
|
||||
if amonPts, err := buildMetrics(m); err == nil {
|
||||
for fieldName, amonPt := range amonPts {
|
||||
metric := &Metric{
|
||||
Metric: mname + "_" + strings.ReplaceAll(fieldName, "_", "."),
|
||||
}
|
||||
metric.Points[0] = amonPt
|
||||
tempSeries = append(tempSeries, metric)
|
||||
metricCounter++
|
||||
}
|
||||
} else {
|
||||
a.Log.Infof("Unable to build Metric for %s, skipping", m.Name())
|
||||
}
|
||||
}
|
||||
|
||||
ts := TimeSeries{}
|
||||
ts.Series = make([]*Metric, metricCounter)
|
||||
copy(ts.Series, tempSeries[0:])
|
||||
tsBytes, err := json.Marshal(ts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to marshal TimeSeries: %w", err)
|
||||
}
|
||||
req, err := http.NewRequest("POST", a.authenticatedURL(), bytes.NewBuffer(tsBytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create http.Request: %w", err)
|
||||
}
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
|
||||
resp, err := a.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error POSTing metrics: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 209 {
|
||||
return fmt.Errorf("received bad status code, %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Amon) authenticatedURL() string {
|
||||
return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey)
|
||||
}
|
||||
|
||||
func buildMetrics(m telegraf.Metric) (map[string]Point, error) {
|
||||
ms := make(map[string]Point)
|
||||
for k, v := range m.Fields() {
|
||||
var p Point
|
||||
if err := p.setValue(v); err != nil {
|
||||
return ms, fmt.Errorf("unable to extract value from Fields: %w", err)
|
||||
}
|
||||
p[0] = float64(m.Time().Unix())
|
||||
ms[k] = p
|
||||
}
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
func (p *Point) setValue(v interface{}) error {
|
||||
switch d := v.(type) {
|
||||
case int:
|
||||
p[1] = float64(d)
|
||||
case int32:
|
||||
p[1] = float64(d)
|
||||
case int64:
|
||||
p[1] = float64(d)
|
||||
case float32:
|
||||
p[1] = float64(d)
|
||||
case float64:
|
||||
p[1] = d
|
||||
default:
|
||||
return errors.New("undeterminable type")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*Amon) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("amon", func() telegraf.Output {
|
||||
return &Amon{}
|
||||
})
|
||||
}
|
89
plugins/outputs/amon/amon_test.go
Normal file
89
plugins/outputs/amon/amon_test.go
Normal file
|
@ -0,0 +1,89 @@
|
|||
package amon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestBuildPoint(t *testing.T) {
|
||||
var tagtests = []struct {
|
||||
ptIn telegraf.Metric
|
||||
outPt Point
|
||||
err error
|
||||
}{
|
||||
{
|
||||
testutil.TestMetric(float64(0.0), "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
0.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestMetric(float64(1.0), "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
1.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestMetric(int(10), "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
10.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestMetric(int32(112345), "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
112345.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestMetric(int64(112345), "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
112345.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestMetric(float32(11234.5), "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
11234.5,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestMetric("11234.5", "testpt"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
11234.5,
|
||||
},
|
||||
errors.New("unable to extract value from Fields, undeterminable type"),
|
||||
},
|
||||
}
|
||||
for _, tt := range tagtests {
|
||||
pt, err := buildMetrics(tt.ptIn)
|
||||
if err != nil && tt.err == nil {
|
||||
t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err)
|
||||
}
|
||||
if tt.err != nil && err == nil {
|
||||
t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(pt["value"], tt.outPt) && tt.err == nil {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n",
|
||||
tt.ptIn.Name(), tt.outPt, pt["value"])
|
||||
}
|
||||
}
|
||||
}
|
10
plugins/outputs/amon/sample.conf
Normal file
10
plugins/outputs/amon/sample.conf
Normal file
|
@ -0,0 +1,10 @@
|
|||
# Configuration for Amon Server to send metrics to.
|
||||
[[outputs.amon]]
|
||||
## Amon Server Key
|
||||
server_key = "my-server-key" # required.
|
||||
|
||||
## Amon Instance URL
|
||||
amon_instance = "https://youramoninstance" # required
|
||||
|
||||
## Connection timeout.
|
||||
# timeout = "5s"
|
146
plugins/outputs/amqp/README.md
Normal file
146
plugins/outputs/amqp/README.md
Normal file
|
@ -0,0 +1,146 @@
|
|||
# AMQP Output Plugin
|
||||
|
||||
This plugin writes to an Advanced Message Queuing Protocol v0.9.1 broker.
|
||||
A prominent implementation of this protocol is [RabbitMQ][rabbitmq].
|
||||
|
||||
> [!NOTE]
|
||||
> This plugin does not bind the AMQP exchange to a queue.
|
||||
|
||||
For an introduction check the [AMQP concepts page][amqp_concepts] and the
|
||||
[RabbitMQ getting started guide][rabbitmq_getting_started].
|
||||
|
||||
⭐ Telegraf v0.1.9
|
||||
🏷️ messaging
|
||||
💻 all
|
||||
|
||||
[amqp_concepts]: https://www.rabbitmq.com/tutorials/amqp-concepts.html
|
||||
[rabbitmq]: https://www.rabbitmq.com
|
||||
[rabbitmq_getting_started]: https://www.rabbitmq.com/getstarted.html
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Secret-store support
|
||||
|
||||
This plugin supports secrets from secret-stores for the `username` and
|
||||
`password` option.
|
||||
See the [secret-store documentation][SECRETSTORE] for more details on how
|
||||
to use them.
|
||||
|
||||
[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Publishes metrics to an AMQP broker
|
||||
[[outputs.amqp]]
|
||||
## Brokers to publish to. If multiple brokers are specified a random broker
|
||||
## will be selected anytime a connection is established. This can be
|
||||
## helpful for load balancing when not using a dedicated load balancer.
|
||||
brokers = ["amqp://localhost:5672/influxdb"]
|
||||
|
||||
## Maximum messages to send over a connection. Once this is reached, the
|
||||
## connection is closed and a new connection is made. This can be helpful for
|
||||
## load balancing when not using a dedicated load balancer.
|
||||
# max_messages = 0
|
||||
|
||||
## Exchange to declare and publish to.
|
||||
exchange = "telegraf"
|
||||
|
||||
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
|
||||
# exchange_type = "topic"
|
||||
|
||||
## If true, exchange will be passively declared.
|
||||
# exchange_passive = false
|
||||
|
||||
## Exchange durability can be either "transient" or "durable".
|
||||
# exchange_durability = "durable"
|
||||
|
||||
## Additional exchange arguments.
|
||||
# exchange_arguments = { }
|
||||
# exchange_arguments = {"hash_property" = "timestamp"}
|
||||
|
||||
## Authentication credentials for the PLAIN auth_method.
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Auth method. PLAIN and EXTERNAL are supported
|
||||
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
## described here: https://www.rabbitmq.com/plugins.html
|
||||
# auth_method = "PLAIN"
|
||||
|
||||
## Metric tag to use as a routing key.
|
||||
## ie, if this tag exists, its value will be used as the routing key
|
||||
# routing_tag = "host"
|
||||
|
||||
## Static routing key. Used when no routing_tag is set or as a fallback
|
||||
## when the tag specified in routing tag is not found.
|
||||
# routing_key = ""
|
||||
# routing_key = "telegraf"
|
||||
|
||||
## Delivery Mode controls if a published message is persistent.
|
||||
## One of "transient" or "persistent".
|
||||
# delivery_mode = "transient"
|
||||
|
||||
## Static headers added to each published message.
|
||||
# headers = { }
|
||||
# headers = {"database" = "telegraf", "retention_policy" = "default"}
|
||||
|
||||
## Connection timeout. If not provided, will default to 5s. 0s means no
|
||||
## timeout (not recommended).
|
||||
# timeout = "5s"
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Optional Proxy Configuration
|
||||
# use_proxy = false
|
||||
# proxy_url = "localhost:8888"
|
||||
|
||||
## If true use batch serialization format instead of line based delimiting.
|
||||
## Only applies to data formats which are not line based such as JSON.
|
||||
## Recommended to set to true.
|
||||
# use_batch_format = false
|
||||
|
||||
## Content encoding for message payloads, can be set to "gzip" to or
|
||||
## "identity" to apply no encoding.
|
||||
##
|
||||
## Please note that when use_batch_format = false each amqp message contains only
|
||||
## a single metric, it is recommended to use compression with batch format
|
||||
## for best results.
|
||||
# content_encoding = "identity"
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
```
|
||||
|
||||
### Routing
|
||||
|
||||
If `routing_tag` is set, and the tag is defined on the metric, the value of the
|
||||
tag is used as the routing key. Otherwise the value of `routing_key` is used
|
||||
directly. If both are unset the empty string is used.
|
||||
|
||||
Exchange types that do not use a routing key, `direct` and `header`, always use
|
||||
the empty string as the routing key.
|
||||
|
||||
Metrics are published in batches based on the final routing key.
|
||||
|
||||
### Proxy
|
||||
|
||||
If you want to use a proxy, you need to set `use_proxy = true`. This will
|
||||
use the system's proxy settings to determine the proxy URL. If you need to
|
||||
specify a proxy URL manually, you can do so by using `proxy_url`, overriding
|
||||
the system settings.
|
334
plugins/outputs/amqp/amqp.go
Normal file
334
plugins/outputs/amqp/amqp.go
Normal file
|
@ -0,0 +1,334 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package amqp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/common/proxy"
|
||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
const (
|
||||
DefaultURL = "amqp://localhost:5672/influxdb"
|
||||
DefaultAuthMethod = "PLAIN"
|
||||
DefaultExchangeType = "topic"
|
||||
DefaultRetentionPolicy = "default"
|
||||
DefaultDatabase = "telegraf"
|
||||
)
|
||||
|
||||
type externalAuth struct{}
|
||||
|
||||
func (*externalAuth) Mechanism() string {
|
||||
return "EXTERNAL"
|
||||
}
|
||||
|
||||
func (*externalAuth) Response() string {
|
||||
return "\000"
|
||||
}
|
||||
|
||||
type AMQP struct {
|
||||
URL string `toml:"url" deprecated:"1.7.0;1.35.0;use 'brokers' instead"`
|
||||
Brokers []string `toml:"brokers"`
|
||||
Exchange string `toml:"exchange"`
|
||||
ExchangeType string `toml:"exchange_type"`
|
||||
ExchangePassive bool `toml:"exchange_passive"`
|
||||
ExchangeDurability string `toml:"exchange_durability"`
|
||||
ExchangeArguments map[string]string `toml:"exchange_arguments"`
|
||||
Username config.Secret `toml:"username"`
|
||||
Password config.Secret `toml:"password"`
|
||||
MaxMessages int `toml:"max_messages"`
|
||||
AuthMethod string `toml:"auth_method"`
|
||||
RoutingTag string `toml:"routing_tag"`
|
||||
RoutingKey string `toml:"routing_key"`
|
||||
DeliveryMode string `toml:"delivery_mode"`
|
||||
Database string `toml:"database" deprecated:"1.7.0;1.35.0;use 'headers' instead"`
|
||||
RetentionPolicy string `toml:"retention_policy" deprecated:"1.7.0;1.35.0;use 'headers' instead"`
|
||||
Precision string `toml:"precision" deprecated:"1.2.0;1.35.0;option is ignored"`
|
||||
Headers map[string]string `toml:"headers"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
UseBatchFormat bool `toml:"use_batch_format"`
|
||||
ContentEncoding string `toml:"content_encoding"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
tls.ClientConfig
|
||||
proxy.TCPProxy
|
||||
|
||||
serializer telegraf.Serializer
|
||||
connect func(*ClientConfig) (Client, error)
|
||||
client Client
|
||||
config *ClientConfig
|
||||
sentMessages int
|
||||
encoder internal.ContentEncoder
|
||||
}
|
||||
|
||||
type Client interface {
|
||||
Publish(key string, body []byte) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
func (*AMQP) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (q *AMQP) SetSerializer(serializer telegraf.Serializer) {
|
||||
q.serializer = serializer
|
||||
}
|
||||
|
||||
func (q *AMQP) Init() error {
|
||||
var err error
|
||||
q.config, err = q.makeClientConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.encoder, err = internal.NewContentEncoder(q.ContentEncoding)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *AMQP) Connect() error {
|
||||
var err error
|
||||
q.client, err = q.connect(q.config)
|
||||
return err
|
||||
}
|
||||
|
||||
func (q *AMQP) Close() error {
|
||||
if q.client != nil {
|
||||
return q.client.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *AMQP) routingKey(metric telegraf.Metric) string {
|
||||
if q.RoutingTag != "" {
|
||||
key, ok := metric.GetTag(q.RoutingTag)
|
||||
if ok {
|
||||
return key
|
||||
}
|
||||
}
|
||||
return q.RoutingKey
|
||||
}
|
||||
|
||||
func (q *AMQP) Write(metrics []telegraf.Metric) error {
|
||||
batches := make(map[string][]telegraf.Metric)
|
||||
if q.ExchangeType == "header" {
|
||||
// Since the routing_key is ignored for this exchange type send as a
|
||||
// single batch.
|
||||
batches[""] = metrics
|
||||
} else {
|
||||
for _, metric := range metrics {
|
||||
routingKey := q.routingKey(metric)
|
||||
if _, ok := batches[routingKey]; !ok {
|
||||
batches[routingKey] = make([]telegraf.Metric, 0)
|
||||
}
|
||||
|
||||
batches[routingKey] = append(batches[routingKey], metric)
|
||||
}
|
||||
}
|
||||
|
||||
first := true
|
||||
for key, metrics := range batches {
|
||||
body, err := q.serialize(metrics)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
body, err = q.encoder.Encode(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = q.publish(key, body)
|
||||
if err != nil {
|
||||
// If this is the first attempt to publish and the connection is
|
||||
// closed, try to reconnect and retry once.
|
||||
|
||||
var aerr *amqp.Error
|
||||
if first && errors.As(err, &aerr) && errors.Is(aerr, amqp.ErrClosed) {
|
||||
q.client = nil
|
||||
err := q.publish(key, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if q.client != nil {
|
||||
if err := q.client.Close(); err != nil {
|
||||
q.Log.Errorf("Closing connection failed: %v", err)
|
||||
}
|
||||
q.client = nil
|
||||
return err
|
||||
}
|
||||
}
|
||||
first = false
|
||||
}
|
||||
|
||||
if q.sentMessages >= q.MaxMessages && q.MaxMessages > 0 {
|
||||
q.Log.Debug("Sent MaxMessages; closing connection")
|
||||
if err := q.client.Close(); err != nil {
|
||||
q.Log.Errorf("Closing connection failed: %v", err)
|
||||
}
|
||||
q.client = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *AMQP) publish(key string, body []byte) error {
|
||||
if q.client == nil {
|
||||
client, err := q.connect(q.config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
q.sentMessages = 0
|
||||
q.client = client
|
||||
}
|
||||
|
||||
err := q.client.Publish(key, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
q.sentMessages++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *AMQP) serialize(metrics []telegraf.Metric) ([]byte, error) {
|
||||
if q.UseBatchFormat {
|
||||
return q.serializer.SerializeBatch(metrics)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
for _, metric := range metrics {
|
||||
octets, err := q.serializer.Serialize(metric)
|
||||
if err != nil {
|
||||
q.Log.Debugf("Could not serialize metric: %v", err)
|
||||
continue
|
||||
}
|
||||
buf.Write(octets)
|
||||
}
|
||||
body := buf.Bytes()
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (q *AMQP) makeClientConfig() (*ClientConfig, error) {
|
||||
clientConfig := &ClientConfig{
|
||||
exchange: q.Exchange,
|
||||
exchangeType: q.ExchangeType,
|
||||
exchangePassive: q.ExchangePassive,
|
||||
encoding: q.ContentEncoding,
|
||||
timeout: time.Duration(q.Timeout),
|
||||
log: q.Log,
|
||||
}
|
||||
|
||||
switch q.ExchangeDurability {
|
||||
case "transient":
|
||||
clientConfig.exchangeDurable = false
|
||||
default:
|
||||
clientConfig.exchangeDurable = true
|
||||
}
|
||||
|
||||
clientConfig.brokers = q.Brokers
|
||||
if len(clientConfig.brokers) == 0 {
|
||||
clientConfig.brokers = []string{q.URL}
|
||||
}
|
||||
|
||||
switch q.DeliveryMode {
|
||||
case "transient":
|
||||
clientConfig.deliveryMode = amqp.Transient
|
||||
case "persistent":
|
||||
clientConfig.deliveryMode = amqp.Persistent
|
||||
default:
|
||||
clientConfig.deliveryMode = amqp.Transient
|
||||
}
|
||||
|
||||
if len(q.Headers) > 0 {
|
||||
clientConfig.headers = make(amqp.Table, len(q.Headers))
|
||||
for k, v := range q.Headers {
|
||||
clientConfig.headers[k] = v
|
||||
}
|
||||
} else {
|
||||
// Copy deprecated fields into message header
|
||||
clientConfig.headers = amqp.Table{
|
||||
"database": q.Database,
|
||||
"retention_policy": q.RetentionPolicy,
|
||||
}
|
||||
}
|
||||
|
||||
if len(q.ExchangeArguments) > 0 {
|
||||
clientConfig.exchangeArguments = make(amqp.Table, len(q.ExchangeArguments))
|
||||
for k, v := range q.ExchangeArguments {
|
||||
clientConfig.exchangeArguments[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
tlsConfig, err := q.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientConfig.tlsConfig = tlsConfig
|
||||
|
||||
dialer, err := q.TCPProxy.Proxy()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientConfig.dialer = dialer
|
||||
|
||||
var auth []amqp.Authentication
|
||||
if strings.EqualFold(q.AuthMethod, "EXTERNAL") {
|
||||
auth = []amqp.Authentication{&externalAuth{}}
|
||||
} else if !q.Username.Empty() || !q.Password.Empty() {
|
||||
username, err := q.Username.Get()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting username failed: %w", err)
|
||||
}
|
||||
defer username.Destroy()
|
||||
password, err := q.Password.Get()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting password failed: %w", err)
|
||||
}
|
||||
defer password.Destroy()
|
||||
auth = []amqp.Authentication{
|
||||
&amqp.PlainAuth{
|
||||
Username: username.String(),
|
||||
Password: password.String(),
|
||||
},
|
||||
}
|
||||
}
|
||||
clientConfig.auth = auth
|
||||
|
||||
return clientConfig, nil
|
||||
}
|
||||
|
||||
func connect(clientConfig *ClientConfig) (Client, error) {
|
||||
return newClient(clientConfig)
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("amqp", func() telegraf.Output {
|
||||
return &AMQP{
|
||||
Brokers: []string{DefaultURL},
|
||||
ExchangeType: DefaultExchangeType,
|
||||
AuthMethod: DefaultAuthMethod,
|
||||
Headers: map[string]string{
|
||||
"database": DefaultDatabase,
|
||||
"retention_policy": DefaultRetentionPolicy,
|
||||
},
|
||||
Timeout: config.Duration(time.Second * 5),
|
||||
connect: connect,
|
||||
}
|
||||
})
|
||||
}
|
160
plugins/outputs/amqp/amqp_test.go
Normal file
160
plugins/outputs/amqp/amqp_test.go
Normal file
|
@ -0,0 +1,160 @@
|
|||
package amqp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/config"
|
||||
)
|
||||
|
||||
type MockClient struct {
|
||||
PublishF func() error
|
||||
CloseF func() error
|
||||
|
||||
PublishCallCount int
|
||||
CloseCallCount int
|
||||
}
|
||||
|
||||
func (c *MockClient) Publish(string, []byte) error {
|
||||
c.PublishCallCount++
|
||||
return c.PublishF()
|
||||
}
|
||||
|
||||
func (c *MockClient) Close() error {
|
||||
c.CloseCallCount++
|
||||
return c.CloseF()
|
||||
}
|
||||
|
||||
func NewMockClient() Client {
|
||||
return &MockClient{
|
||||
PublishF: func() error {
|
||||
return nil
|
||||
},
|
||||
CloseF: func() error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnect(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
output *AMQP
|
||||
errFunc func(t *testing.T, output *AMQP, err error)
|
||||
}{
|
||||
{
|
||||
name: "defaults",
|
||||
output: &AMQP{
|
||||
Brokers: []string{DefaultURL},
|
||||
ExchangeType: DefaultExchangeType,
|
||||
ExchangeDurability: "durable",
|
||||
AuthMethod: DefaultAuthMethod,
|
||||
Headers: map[string]string{
|
||||
"database": DefaultDatabase,
|
||||
"retention_policy": DefaultRetentionPolicy,
|
||||
},
|
||||
Timeout: config.Duration(time.Second * 5),
|
||||
connect: func(_ *ClientConfig) (Client, error) {
|
||||
return NewMockClient(), nil
|
||||
},
|
||||
},
|
||||
errFunc: func(t *testing.T, output *AMQP, err error) {
|
||||
cfg := output.config
|
||||
require.Equal(t, []string{DefaultURL}, cfg.brokers)
|
||||
require.Empty(t, cfg.exchange)
|
||||
require.Equal(t, "topic", cfg.exchangeType)
|
||||
require.False(t, cfg.exchangePassive)
|
||||
require.True(t, cfg.exchangeDurable)
|
||||
require.Equal(t, amqp.Table(nil), cfg.exchangeArguments)
|
||||
require.Equal(t, amqp.Table{
|
||||
"database": DefaultDatabase,
|
||||
"retention_policy": DefaultRetentionPolicy,
|
||||
}, cfg.headers)
|
||||
require.Equal(t, amqp.Transient, cfg.deliveryMode)
|
||||
require.NoError(t, err)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "headers overrides deprecated dbrp",
|
||||
output: &AMQP{
|
||||
Headers: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
connect: func(_ *ClientConfig) (Client, error) {
|
||||
return NewMockClient(), nil
|
||||
},
|
||||
},
|
||||
errFunc: func(t *testing.T, output *AMQP, err error) {
|
||||
cfg := output.config
|
||||
require.Equal(t, amqp.Table{
|
||||
"foo": "bar",
|
||||
}, cfg.headers)
|
||||
require.NoError(t, err)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exchange args",
|
||||
output: &AMQP{
|
||||
ExchangeArguments: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
connect: func(_ *ClientConfig) (Client, error) {
|
||||
return NewMockClient(), nil
|
||||
},
|
||||
},
|
||||
errFunc: func(t *testing.T, output *AMQP, err error) {
|
||||
cfg := output.config
|
||||
require.Equal(t, amqp.Table{
|
||||
"foo": "bar",
|
||||
}, cfg.exchangeArguments)
|
||||
require.NoError(t, err)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "username password",
|
||||
output: &AMQP{
|
||||
URL: "amqp://foo:bar@localhost",
|
||||
Username: config.NewSecret([]byte("telegraf")),
|
||||
Password: config.NewSecret([]byte("pa$$word")),
|
||||
connect: func(_ *ClientConfig) (Client, error) {
|
||||
return NewMockClient(), nil
|
||||
},
|
||||
},
|
||||
errFunc: func(t *testing.T, output *AMQP, err error) {
|
||||
cfg := output.config
|
||||
require.Equal(t, []amqp.Authentication{
|
||||
&amqp.PlainAuth{
|
||||
Username: "telegraf",
|
||||
Password: "pa$$word",
|
||||
},
|
||||
}, cfg.auth)
|
||||
|
||||
require.NoError(t, err)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "url support",
|
||||
output: &AMQP{
|
||||
URL: DefaultURL,
|
||||
connect: func(_ *ClientConfig) (Client, error) {
|
||||
return NewMockClient(), nil
|
||||
},
|
||||
},
|
||||
errFunc: func(t *testing.T, output *AMQP, err error) {
|
||||
cfg := output.config
|
||||
require.Equal(t, []string{DefaultURL}, cfg.brokers)
|
||||
require.NoError(t, err)
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
require.NoError(t, tt.output.Init())
|
||||
err := tt.output.Connect()
|
||||
tt.errFunc(t, tt.output, err)
|
||||
})
|
||||
}
|
||||
}
|
146
plugins/outputs/amqp/client.go
Normal file
146
plugins/outputs/amqp/client.go
Normal file
|
@ -0,0 +1,146 @@
|
|||
package amqp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/common/proxy"
|
||||
)
|
||||
|
||||
type ClientConfig struct {
|
||||
brokers []string
|
||||
exchange string
|
||||
exchangeType string
|
||||
exchangePassive bool
|
||||
exchangeDurable bool
|
||||
exchangeArguments amqp.Table
|
||||
encoding string
|
||||
headers amqp.Table
|
||||
deliveryMode uint8
|
||||
tlsConfig *tls.Config
|
||||
timeout time.Duration
|
||||
auth []amqp.Authentication
|
||||
dialer *proxy.ProxiedDialer
|
||||
log telegraf.Logger
|
||||
}
|
||||
|
||||
type client struct {
|
||||
conn *amqp.Connection
|
||||
channel *amqp.Channel
|
||||
config *ClientConfig
|
||||
}
|
||||
|
||||
// newClient opens a connection to one of the brokers at random
|
||||
func newClient(config *ClientConfig) (*client, error) {
|
||||
client := &client{
|
||||
config: config,
|
||||
}
|
||||
|
||||
p := rand.Perm(len(config.brokers))
|
||||
for _, n := range p {
|
||||
broker := config.brokers[n]
|
||||
config.log.Debugf("Connecting to %q", broker)
|
||||
conn, err := amqp.DialConfig(
|
||||
broker, amqp.Config{
|
||||
TLSClientConfig: config.tlsConfig,
|
||||
SASL: config.auth, // if nil, it will be PLAIN taken from url
|
||||
Dial: func(network, addr string) (net.Conn, error) {
|
||||
return config.dialer.DialTimeout(network, addr, config.timeout)
|
||||
},
|
||||
})
|
||||
if err == nil {
|
||||
client.conn = conn
|
||||
config.log.Debugf("Connected to %q", broker)
|
||||
break
|
||||
}
|
||||
config.log.Debugf("Error connecting to %q - %v", broker, err.Error())
|
||||
}
|
||||
|
||||
if client.conn == nil {
|
||||
return nil, errors.New("could not connect to any broker")
|
||||
}
|
||||
|
||||
channel, err := client.conn.Channel()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening channel: %w", err)
|
||||
}
|
||||
client.channel = channel
|
||||
|
||||
err = client.DeclareExchange()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (c *client) DeclareExchange() error {
|
||||
if c.config.exchange == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
if c.config.exchangePassive {
|
||||
err = c.channel.ExchangeDeclarePassive(
|
||||
c.config.exchange,
|
||||
c.config.exchangeType,
|
||||
c.config.exchangeDurable,
|
||||
false, // delete when unused
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
c.config.exchangeArguments,
|
||||
)
|
||||
} else {
|
||||
err = c.channel.ExchangeDeclare(
|
||||
c.config.exchange,
|
||||
c.config.exchangeType,
|
||||
c.config.exchangeDurable,
|
||||
false, // delete when unused
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
c.config.exchangeArguments,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error declaring exchange: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *client) Publish(key string, body []byte) error {
|
||||
// Note that since the channel is not in confirm mode, the absence of
|
||||
// an error does not indicate successful delivery.
|
||||
return c.channel.PublishWithContext(
|
||||
context.Background(),
|
||||
c.config.exchange, // exchange
|
||||
key, // routing key
|
||||
false, // mandatory
|
||||
false, // immediate
|
||||
amqp.Publishing{
|
||||
Headers: c.config.headers,
|
||||
ContentType: "text/plain",
|
||||
ContentEncoding: c.config.encoding,
|
||||
Body: body,
|
||||
DeliveryMode: c.config.deliveryMode,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *client) Close() error {
|
||||
if c.conn == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := c.conn.Close()
|
||||
if err != nil && !errors.Is(err, amqp.ErrClosed) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
87
plugins/outputs/amqp/sample.conf
Normal file
87
plugins/outputs/amqp/sample.conf
Normal file
|
@ -0,0 +1,87 @@
|
|||
# Publishes metrics to an AMQP broker
|
||||
[[outputs.amqp]]
|
||||
## Brokers to publish to. If multiple brokers are specified a random broker
|
||||
## will be selected anytime a connection is established. This can be
|
||||
## helpful for load balancing when not using a dedicated load balancer.
|
||||
brokers = ["amqp://localhost:5672/influxdb"]
|
||||
|
||||
## Maximum messages to send over a connection. Once this is reached, the
|
||||
## connection is closed and a new connection is made. This can be helpful for
|
||||
## load balancing when not using a dedicated load balancer.
|
||||
# max_messages = 0
|
||||
|
||||
## Exchange to declare and publish to.
|
||||
exchange = "telegraf"
|
||||
|
||||
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
|
||||
# exchange_type = "topic"
|
||||
|
||||
## If true, exchange will be passively declared.
|
||||
# exchange_passive = false
|
||||
|
||||
## Exchange durability can be either "transient" or "durable".
|
||||
# exchange_durability = "durable"
|
||||
|
||||
## Additional exchange arguments.
|
||||
# exchange_arguments = { }
|
||||
# exchange_arguments = {"hash_property" = "timestamp"}
|
||||
|
||||
## Authentication credentials for the PLAIN auth_method.
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Auth method. PLAIN and EXTERNAL are supported
|
||||
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
## described here: https://www.rabbitmq.com/plugins.html
|
||||
# auth_method = "PLAIN"
|
||||
|
||||
## Metric tag to use as a routing key.
|
||||
## ie, if this tag exists, its value will be used as the routing key
|
||||
# routing_tag = "host"
|
||||
|
||||
## Static routing key. Used when no routing_tag is set or as a fallback
|
||||
## when the tag specified in routing tag is not found.
|
||||
# routing_key = ""
|
||||
# routing_key = "telegraf"
|
||||
|
||||
## Delivery Mode controls if a published message is persistent.
|
||||
## One of "transient" or "persistent".
|
||||
# delivery_mode = "transient"
|
||||
|
||||
## Static headers added to each published message.
|
||||
# headers = { }
|
||||
# headers = {"database" = "telegraf", "retention_policy" = "default"}
|
||||
|
||||
## Connection timeout. If not provided, will default to 5s. 0s means no
|
||||
## timeout (not recommended).
|
||||
# timeout = "5s"
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Optional Proxy Configuration
|
||||
# use_proxy = false
|
||||
# proxy_url = "localhost:8888"
|
||||
|
||||
## If true use batch serialization format instead of line based delimiting.
|
||||
## Only applies to data formats which are not line based such as JSON.
|
||||
## Recommended to set to true.
|
||||
# use_batch_format = false
|
||||
|
||||
## Content encoding for message payloads, can be set to "gzip" to or
|
||||
## "identity" to apply no encoding.
|
||||
##
|
||||
## Please note that when use_batch_format = false each amqp message contains only
|
||||
## a single metric, it is recommended to use compression with batch format
|
||||
## for best results.
|
||||
# content_encoding = "identity"
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
69
plugins/outputs/application_insights/README.md
Normal file
69
plugins/outputs/application_insights/README.md
Normal file
|
@ -0,0 +1,69 @@
|
|||
# Azure Application Insights Output Plugin
|
||||
|
||||
This plugin writes metrics to the [Azure Application Insights][insights]
|
||||
service.
|
||||
|
||||
⭐ Telegraf v1.7.0
|
||||
🏷️ applications, cloud
|
||||
💻 all
|
||||
|
||||
[insights]: https://azure.microsoft.com/en-us/services/application-insights/
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Send metrics to Azure Application Insights
|
||||
[[outputs.application_insights]]
|
||||
## Instrumentation key of the Application Insights resource.
|
||||
instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
|
||||
|
||||
## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints
|
||||
# endpoint_url = "https://dc.services.visualstudio.com/v2/track"
|
||||
|
||||
## Timeout for closing (default: 5s).
|
||||
# timeout = "5s"
|
||||
|
||||
## Enable additional diagnostic logging.
|
||||
# enable_diagnostic_logging = false
|
||||
|
||||
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
|
||||
## plugin definition, otherwise additional config options are read as part of
|
||||
## the table
|
||||
|
||||
## Context Tag Sources add Application Insights context tags to a tag value.
|
||||
##
|
||||
## For list of allowed context tag keys see:
|
||||
## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
|
||||
# [outputs.application_insights.context_tag_sources]
|
||||
# "ai.cloud.role" = "kubernetes_container_name"
|
||||
# "ai.cloud.roleInstance" = "kubernetes_pod_name"
|
||||
```
|
||||
|
||||
## Metric Encoding
|
||||
|
||||
For each field an Application Insights Telemetry record is created named based
|
||||
on the measurement name and field.
|
||||
|
||||
**Example:** Create the telemetry records `foo_first` and `foo_second`:
|
||||
|
||||
```text
|
||||
foo,host=a first=42,second=43 1525293034000000000
|
||||
```
|
||||
|
||||
In the special case of a single field named `value`, a single telemetry record
|
||||
is created named using only the measurement name
|
||||
|
||||
**Example:** Create a telemetry record `bar`:
|
||||
|
||||
```text
|
||||
bar,host=a value=42 1525293034000000000
|
||||
```
|
321
plugins/outputs/application_insights/application_insights.go
Normal file
321
plugins/outputs/application_insights/application_insights.go
Normal file
|
@ -0,0 +1,321 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package application_insights
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/microsoft/ApplicationInsights-Go/appinsights"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type TelemetryTransmitter interface {
|
||||
Track(appinsights.Telemetry)
|
||||
Close() <-chan struct{}
|
||||
}
|
||||
|
||||
type DiagnosticsMessageSubscriber interface {
|
||||
Subscribe(appinsights.DiagnosticsMessageHandler) appinsights.DiagnosticsMessageListener
|
||||
}
|
||||
|
||||
type ApplicationInsights struct {
|
||||
InstrumentationKey string `toml:"instrumentation_key"`
|
||||
EndpointURL string `toml:"endpoint_url"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
EnableDiagnosticLogging bool `toml:"enable_diagnostic_logging"`
|
||||
ContextTagSources map[string]string `toml:"context_tag_sources"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
diagMsgSubscriber DiagnosticsMessageSubscriber
|
||||
transmitter TelemetryTransmitter
|
||||
diagMsgListener appinsights.DiagnosticsMessageListener
|
||||
}
|
||||
|
||||
func (*ApplicationInsights) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (a *ApplicationInsights) Connect() error {
|
||||
if a.InstrumentationKey == "" {
|
||||
return errors.New("instrumentation key is required")
|
||||
}
|
||||
|
||||
if a.transmitter == nil {
|
||||
a.transmitter = NewTransmitter(a.InstrumentationKey, a.EndpointURL)
|
||||
}
|
||||
|
||||
if a.EnableDiagnosticLogging && a.diagMsgSubscriber != nil {
|
||||
a.diagMsgListener = a.diagMsgSubscriber.Subscribe(func(msg string) error {
|
||||
a.Log.Info(msg)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ApplicationInsights) Write(metrics []telegraf.Metric) error {
|
||||
for _, metric := range metrics {
|
||||
allMetricTelemetry := a.createTelemetry(metric)
|
||||
for _, telemetry := range allMetricTelemetry {
|
||||
a.transmitter.Track(telemetry)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ApplicationInsights) Close() error {
|
||||
if a.diagMsgListener != nil {
|
||||
// We want to listen to diagnostic messages during closing
|
||||
// That is why we stop listening only after Close() ends (or a timeout occurs)
|
||||
defer a.diagMsgListener.Remove()
|
||||
}
|
||||
|
||||
if a.transmitter == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-a.transmitter.Close():
|
||||
a.Log.Info("Closed")
|
||||
case <-time.After(time.Duration(a.Timeout)):
|
||||
a.Log.Warnf("Close operation timed out after %v", time.Duration(a.Timeout))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ApplicationInsights) createTelemetry(metric telegraf.Metric) []appinsights.Telemetry {
|
||||
aggregateTelemetry, usedFields := a.createAggregateMetricTelemetry(metric)
|
||||
if aggregateTelemetry != nil {
|
||||
telemetry := a.createTelemetryForUnusedFields(metric, usedFields)
|
||||
telemetry = append(telemetry, aggregateTelemetry)
|
||||
return telemetry
|
||||
}
|
||||
|
||||
fields := metric.Fields()
|
||||
if len(fields) == 1 && metric.FieldList()[0].Key == "value" {
|
||||
// Just use metric name as the telemetry name
|
||||
telemetry := a.createSimpleMetricTelemetry(metric, "value", false)
|
||||
if telemetry != nil {
|
||||
return []appinsights.Telemetry{telemetry}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// AppInsights does not support multi-dimensional metrics at the moment, so we need to disambiguate resulting telemetry
|
||||
// by adding field name as the telemetry name suffix
|
||||
return a.createTelemetryForUnusedFields(metric, nil)
|
||||
}
|
||||
|
||||
func (a *ApplicationInsights) createSimpleMetricTelemetry(
|
||||
metric telegraf.Metric,
|
||||
fieldName string,
|
||||
useFieldNameInTelemetryName bool,
|
||||
) *appinsights.MetricTelemetry {
|
||||
telemetryValue, err := getFloat64TelemetryPropertyValue([]string{fieldName}, metric, nil)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var telemetryName string
|
||||
if useFieldNameInTelemetryName {
|
||||
telemetryName = metric.Name() + "_" + fieldName
|
||||
} else {
|
||||
telemetryName = metric.Name()
|
||||
}
|
||||
telemetry := appinsights.NewMetricTelemetry(telemetryName, telemetryValue)
|
||||
telemetry.Properties = metric.Tags()
|
||||
a.addContextTags(metric, telemetry)
|
||||
telemetry.Timestamp = metric.Time()
|
||||
return telemetry
|
||||
}
|
||||
|
||||
func (a *ApplicationInsights) createAggregateMetricTelemetry(metric telegraf.Metric) (*appinsights.AggregateMetricTelemetry, []string) {
|
||||
usedFields := make([]string, 0, 6) // We will use up to 6 fields
|
||||
|
||||
// Get the sum of all individual measurements(mandatory property)
|
||||
telemetryValue, err := getFloat64TelemetryPropertyValue([]string{"sum", "value"}, metric, &usedFields)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the count of measurements (mandatory property)
|
||||
telemetryCount, err := getIntTelemetryPropertyValue([]string{"count", "samples"}, metric, &usedFields)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
telemetry := appinsights.NewAggregateMetricTelemetry(metric.Name())
|
||||
telemetry.Value = telemetryValue
|
||||
telemetry.Count = telemetryCount
|
||||
telemetry.Properties = metric.Tags()
|
||||
a.addContextTags(metric, telemetry)
|
||||
telemetry.Timestamp = metric.Time()
|
||||
|
||||
// We attempt to set min, max, variance and stddev fields but do not really care if they are not present--
|
||||
// they are not essential for aggregate metric.
|
||||
// By convention AppInsights prefers stddev over variance, so to be consistent, we test for stddev after testing for variance.
|
||||
//nolint:errcheck // see above
|
||||
telemetry.Min, _ = getFloat64TelemetryPropertyValue([]string{"min"}, metric, &usedFields)
|
||||
//nolint:errcheck // see above
|
||||
telemetry.Max, _ = getFloat64TelemetryPropertyValue([]string{"max"}, metric, &usedFields)
|
||||
//nolint:errcheck // see above
|
||||
telemetry.Variance, _ = getFloat64TelemetryPropertyValue([]string{"variance"}, metric, &usedFields)
|
||||
//nolint:errcheck // see above
|
||||
telemetry.StdDev, _ = getFloat64TelemetryPropertyValue([]string{"stddev"}, metric, &usedFields)
|
||||
|
||||
return telemetry, usedFields
|
||||
}
|
||||
|
||||
func (a *ApplicationInsights) createTelemetryForUnusedFields(metric telegraf.Metric, usedFields []string) []appinsights.Telemetry {
|
||||
fields := metric.Fields()
|
||||
retval := make([]appinsights.Telemetry, 0, len(fields))
|
||||
|
||||
for fieldName := range fields {
|
||||
if contains(usedFields, fieldName) {
|
||||
continue
|
||||
}
|
||||
|
||||
telemetry := a.createSimpleMetricTelemetry(metric, fieldName, true)
|
||||
if telemetry != nil {
|
||||
retval = append(retval, telemetry)
|
||||
}
|
||||
}
|
||||
|
||||
return retval
|
||||
}
|
||||
|
||||
func (a *ApplicationInsights) addContextTags(metric telegraf.Metric, telemetry appinsights.Telemetry) {
|
||||
for contextTagName, tagSourceName := range a.ContextTagSources {
|
||||
if contextTagValue, found := metric.GetTag(tagSourceName); found {
|
||||
telemetry.ContextTags()[contextTagName] = contextTagValue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getFloat64TelemetryPropertyValue(
|
||||
candidateFields []string,
|
||||
metric telegraf.Metric,
|
||||
usedFields *[]string,
|
||||
) (float64, error) {
|
||||
for _, fieldName := range candidateFields {
|
||||
fieldValue, found := metric.GetField(fieldName)
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
metricValue, err := toFloat64(fieldValue)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if usedFields != nil {
|
||||
*usedFields = append(*usedFields, fieldName)
|
||||
}
|
||||
|
||||
return metricValue, nil
|
||||
}
|
||||
|
||||
return 0.0, errors.New("no field from the candidate list was found in the metric")
|
||||
}
|
||||
|
||||
func getIntTelemetryPropertyValue(
|
||||
candidateFields []string,
|
||||
metric telegraf.Metric,
|
||||
usedFields *[]string,
|
||||
) (int, error) {
|
||||
for _, fieldName := range candidateFields {
|
||||
fieldValue, found := metric.GetField(fieldName)
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
metricValue, err := toInt(fieldValue)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if usedFields != nil {
|
||||
*usedFields = append(*usedFields, fieldName)
|
||||
}
|
||||
|
||||
return metricValue, nil
|
||||
}
|
||||
|
||||
return 0, errors.New("no field from the candidate list was found in the metric")
|
||||
}
|
||||
|
||||
func contains(set []string, val string) bool {
|
||||
for _, elem := range set {
|
||||
if elem == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func toFloat64(value interface{}) (float64, error) {
|
||||
// Out of all Golang numerical types Telegraf only uses int64, unit64 and float64 for fields
|
||||
switch v := value.(type) {
|
||||
case int64:
|
||||
return float64(v), nil
|
||||
case uint64:
|
||||
return float64(v), nil
|
||||
case float64:
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return 0.0, fmt.Errorf("[%s] cannot be converted to a float64 value", value)
|
||||
}
|
||||
|
||||
func toInt(value interface{}) (int, error) {
|
||||
// Out of all Golang numerical types Telegraf only uses int64, unit64 and float64 for fields
|
||||
switch v := value.(type) {
|
||||
case uint64:
|
||||
if strconv.IntSize == 32 {
|
||||
if v > math.MaxInt32 {
|
||||
return 0, fmt.Errorf("value [%d] out of range of 32-bit integers", v)
|
||||
}
|
||||
} else {
|
||||
if v > math.MaxInt64 {
|
||||
return 0, fmt.Errorf("value [%d] out of range of 64-bit integers", v)
|
||||
}
|
||||
}
|
||||
|
||||
return int(v), nil
|
||||
|
||||
case int64:
|
||||
if strconv.IntSize == 32 {
|
||||
if v > math.MaxInt32 || v < math.MinInt32 {
|
||||
return 0, fmt.Errorf("value [%d] out of range of 32-bit integers", v)
|
||||
}
|
||||
}
|
||||
|
||||
return int(v), nil
|
||||
}
|
||||
|
||||
return 0.0, fmt.Errorf("[%s] cannot be converted to an int value", value)
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("application_insights", func() telegraf.Output {
|
||||
return &ApplicationInsights{
|
||||
Timeout: config.Duration(time.Second * 5),
|
||||
diagMsgSubscriber: diagnosticsMessageSubscriber{},
|
||||
// It is very common to set Cloud.RoleName and Cloud.RoleInstance context properties, hence initial capacity of two
|
||||
ContextTagSources: make(map[string]string, 2),
|
||||
}
|
||||
})
|
||||
}
|
|
@ -0,0 +1,462 @@
|
|||
package application_insights
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/microsoft/ApplicationInsights-Go/appinsights"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/outputs/application_insights/mocks"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestConnectFailsIfNoIkey(t *testing.T) {
|
||||
transmitter := new(mocks.Transmitter)
|
||||
transmitter.On("Close").Return(closed)
|
||||
|
||||
ai := ApplicationInsights{
|
||||
transmitter: transmitter,
|
||||
// Very long timeout to ensure we do not rely on timeouts for closing the transmitter
|
||||
Timeout: config.Duration(time.Hour),
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
err := ai.Connect()
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestOutputCloseTimesOut(t *testing.T) {
|
||||
transmitter := new(mocks.Transmitter)
|
||||
transmitter.On("Close").Return(unfinished)
|
||||
|
||||
ai := ApplicationInsights{
|
||||
transmitter: transmitter,
|
||||
Timeout: config.Duration(time.Millisecond * 50),
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
err := ai.Close()
|
||||
require.NoError(t, err)
|
||||
transmitter.AssertCalled(t, "Close")
|
||||
}
|
||||
|
||||
func TestCloseRemovesDiagMsgListener(t *testing.T) {
|
||||
transmitter := new(mocks.Transmitter)
|
||||
transmitter.On("Close").Return(closed)
|
||||
|
||||
diagMsgListener := new(mocks.DiagnosticsMessageListener)
|
||||
diagMsgListener.On("Remove")
|
||||
|
||||
diagMsgSubscriber := new(mocks.DiagnosticsMessageSubscriber)
|
||||
diagMsgSubscriber.
|
||||
On("Subscribe", mock.AnythingOfType("appinsights.DiagnosticsMessageHandler")).
|
||||
Return(diagMsgListener)
|
||||
|
||||
ai := ApplicationInsights{
|
||||
transmitter: transmitter,
|
||||
Timeout: config.Duration(time.Hour),
|
||||
EnableDiagnosticLogging: true,
|
||||
diagMsgSubscriber: diagMsgSubscriber,
|
||||
InstrumentationKey: "1234", // Fake, but necessary to enable tracking
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
err := ai.Connect()
|
||||
require.NoError(t, err)
|
||||
diagMsgSubscriber.AssertCalled(t, "Subscribe", mock.AnythingOfType("appinsights.DiagnosticsMessageHandler"))
|
||||
|
||||
err = ai.Close()
|
||||
require.NoError(t, err)
|
||||
transmitter.AssertCalled(t, "Close")
|
||||
diagMsgListener.AssertCalled(t, "Remove")
|
||||
}
|
||||
|
||||
func TestAggregateMetricCreated(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fields map[string]interface{}
|
||||
valueField string
|
||||
countField string
|
||||
additionalMetricValueFields []string
|
||||
}{
|
||||
{"value and count", map[string]interface{}{"value": 16.5, "count": 23}, "value", "count", nil},
|
||||
{"value and samples", map[string]interface{}{"value": 16.5, "samples": 23}, "value", "samples", nil},
|
||||
{"sum and count", map[string]interface{}{"sum": 16.5, "count": 23}, "sum", "count", nil},
|
||||
{"sum and samples", map[string]interface{}{"samples": 23, "sum": 16.5}, "sum", "samples", nil},
|
||||
{"value and count, sum is wrong type", map[string]interface{}{"sum": "J23", "value": 16.5, "count": 23}, "value", "count", nil},
|
||||
{
|
||||
"with aggregates",
|
||||
map[string]interface{}{
|
||||
"value": 16.5,
|
||||
"count": 23,
|
||||
"min": -2.1,
|
||||
"max": 34,
|
||||
"stddev": 3.4,
|
||||
},
|
||||
"value",
|
||||
"count",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"some aggregates with invalid values",
|
||||
map[string]interface{}{
|
||||
"value": 16.5,
|
||||
"count": 23,
|
||||
"min": "min",
|
||||
"max": []float64{3.4, 5.6},
|
||||
"stddev": struct {
|
||||
name string
|
||||
value float64
|
||||
}{"delta", 7.0},
|
||||
},
|
||||
"value",
|
||||
"count",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"aggregate with additional fields",
|
||||
map[string]interface{}{"value": 16.5, "samples": 23, "alpha": -34e12, "bravo": -3, "charlie": "charlie"},
|
||||
"value",
|
||||
"samples",
|
||||
[]string{"alpha", "bravo"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tf := func(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
|
||||
transmitter := new(mocks.Transmitter)
|
||||
transmitter.On("Track", mock.Anything)
|
||||
metricName := "ShouldBeAggregateMetric"
|
||||
|
||||
m := metric.New(
|
||||
metricName,
|
||||
nil, // tags
|
||||
tt.fields,
|
||||
now,
|
||||
)
|
||||
|
||||
ai := ApplicationInsights{
|
||||
transmitter: transmitter,
|
||||
InstrumentationKey: "1234", // Fake, but necessary to enable tracking
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
err := ai.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
mSet := []telegraf.Metric{m}
|
||||
err = ai.Write(mSet)
|
||||
require.NoError(t, err)
|
||||
transmitter.AssertNumberOfCalls(t, "Track", 1+len(tt.additionalMetricValueFields))
|
||||
var pAggregateTelemetry *appinsights.AggregateMetricTelemetry
|
||||
require.IsType(
|
||||
t,
|
||||
pAggregateTelemetry,
|
||||
transmitter.Calls[len(transmitter.Calls)-1].Arguments.Get(0),
|
||||
"Expected last telemetry to be AggregateMetricTelemetry",
|
||||
)
|
||||
aggregateTelemetry := transmitter.Calls[len(transmitter.Calls)-1].Arguments.Get(0).(*appinsights.AggregateMetricTelemetry)
|
||||
verifyAggregateTelemetry(t, m, tt.valueField, tt.countField, aggregateTelemetry)
|
||||
|
||||
verifyAdditionalTelemetry(t, m, transmitter, tt.additionalMetricValueFields, metricName)
|
||||
}
|
||||
|
||||
t.Run(tt.name, tf)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleMetricCreated(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fields map[string]interface{}
|
||||
primaryMetricValueField string
|
||||
additionalMetricValueFields []string
|
||||
}{
|
||||
{"just a single value field", map[string]interface{}{"value": 16.5}, "value", nil},
|
||||
{"single field not named value", map[string]interface{}{"first": 32.9}, "first", nil},
|
||||
{"value but no count", map[string]interface{}{"value": 16.5, "other": "bulba"}, "", []string{"value"}},
|
||||
{"count but no value", map[string]interface{}{"v1": "v1Val", "count": 23}, "", []string{"count"}},
|
||||
{"neither value nor count", map[string]interface{}{"v1": "alpha", "v2": 45.8}, "", []string{"v2"}},
|
||||
{"value is of wrong type", map[string]interface{}{"value": "alpha", "count": 15}, "", []string{"count"}},
|
||||
{"count is of wrong type", map[string]interface{}{"value": 23.77, "count": 7.5}, "", []string{"count", "value"}},
|
||||
{"count is out of range", map[string]interface{}{"value": -98.45e4, "count": float64(math.MaxUint64 - uint64(20))}, "", []string{"value", "count"}},
|
||||
{
|
||||
"several additional fields",
|
||||
map[string]interface{}{"alpha": 10, "bravo": "bravo", "charlie": 30, "delta": 40.7},
|
||||
"",
|
||||
[]string{"alpha", "charlie", "delta"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tf := func(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
|
||||
transmitter := new(mocks.Transmitter)
|
||||
transmitter.On("Track", mock.Anything)
|
||||
metricName := "ShouldBeSimpleMetric"
|
||||
|
||||
m := metric.New(
|
||||
metricName,
|
||||
nil, // tags
|
||||
tt.fields,
|
||||
now,
|
||||
)
|
||||
|
||||
ai := ApplicationInsights{
|
||||
transmitter: transmitter,
|
||||
InstrumentationKey: "1234", // Fake, but necessary to enable tracking
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
err := ai.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
mSet := []telegraf.Metric{m}
|
||||
err = ai.Write(mSet)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedNumberOfCalls := len(tt.additionalMetricValueFields)
|
||||
if tt.primaryMetricValueField != "" {
|
||||
expectedNumberOfCalls++
|
||||
}
|
||||
|
||||
transmitter.AssertNumberOfCalls(t, "Track", expectedNumberOfCalls)
|
||||
if tt.primaryMetricValueField != "" {
|
||||
var pMetricTelemetry *appinsights.MetricTelemetry
|
||||
require.IsType(t, pMetricTelemetry, transmitter.Calls[0].Arguments.Get(0), "First created telemetry should be simple MetricTelemetry")
|
||||
metricTelemetry := transmitter.Calls[0].Arguments.Get(0).(*appinsights.MetricTelemetry)
|
||||
|
||||
var expectedTelemetryName string
|
||||
if tt.primaryMetricValueField == "value" {
|
||||
expectedTelemetryName = m.Name()
|
||||
} else {
|
||||
expectedTelemetryName = m.Name() + "_" + tt.primaryMetricValueField
|
||||
}
|
||||
verifySimpleTelemetry(t, m, tt.primaryMetricValueField, expectedTelemetryName, metricTelemetry)
|
||||
}
|
||||
|
||||
verifyAdditionalTelemetry(t, m, transmitter, tt.additionalMetricValueFields, metricName)
|
||||
}
|
||||
|
||||
t.Run(tt.name, tf)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagsAppliedToTelemetry(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
metricValueFields []string
|
||||
}{
|
||||
{
|
||||
"value but no count",
|
||||
map[string]interface{}{"value": 16.5, "alpha": 3.5, "bravo": 17},
|
||||
map[string]string{"alpha": "a tag is not a field", "charlie": "charlie"},
|
||||
[]string{"value", "alpha", "bravo"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tf := func(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
|
||||
transmitter := new(mocks.Transmitter)
|
||||
transmitter.On("Track", mock.Anything)
|
||||
metricName := "ShouldBeSimpleMetric"
|
||||
|
||||
m := metric.New(
|
||||
metricName,
|
||||
tt.tags,
|
||||
tt.fields,
|
||||
now,
|
||||
)
|
||||
|
||||
ai := ApplicationInsights{
|
||||
transmitter: transmitter,
|
||||
InstrumentationKey: "1234", // Fake, but necessary to enable tracking
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
err := ai.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
mSet := []telegraf.Metric{m}
|
||||
err = ai.Write(mSet)
|
||||
require.NoError(t, err)
|
||||
transmitter.AssertNumberOfCalls(t, "Track", len(tt.metricValueFields))
|
||||
transmitter.AssertCalled(t, "Track", mock.AnythingOfType("*appinsights.MetricTelemetry"))
|
||||
|
||||
// Will verify that all original tags are present in telemetry.Properties map
|
||||
verifyAdditionalTelemetry(t, m, transmitter, tt.metricValueFields, metricName)
|
||||
}
|
||||
|
||||
t.Run(tt.name, tf)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContextTagsSetOnSimpleTelemetry(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
|
||||
transmitter := new(mocks.Transmitter)
|
||||
transmitter.On("Track", mock.Anything)
|
||||
|
||||
m := metric.New(
|
||||
"SimpleMetric",
|
||||
map[string]string{"kubernetes_container_name": "atcsvc", "kubernetes_pod_name": "bunkie17554"},
|
||||
map[string]interface{}{"value": 23.0},
|
||||
now,
|
||||
)
|
||||
|
||||
ai := ApplicationInsights{
|
||||
transmitter: transmitter,
|
||||
InstrumentationKey: "1234", // Fake, but necessary to enable tracking
|
||||
ContextTagSources: map[string]string{
|
||||
"ai.cloud.role": "kubernetes_container_name",
|
||||
"ai.cloud.roleInstance": "kubernetes_pod_name",
|
||||
"ai.user.id": "nonexistent",
|
||||
},
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
err := ai.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
mSet := []telegraf.Metric{m}
|
||||
err = ai.Write(mSet)
|
||||
require.NoError(t, err)
|
||||
transmitter.AssertNumberOfCalls(t, "Track", 1)
|
||||
metricTelemetry := transmitter.Calls[0].Arguments.Get(0).(*appinsights.MetricTelemetry)
|
||||
cloudTags := metricTelemetry.Tags.Cloud()
|
||||
require.Equal(t, "atcsvc", cloudTags.GetRole())
|
||||
require.Equal(t, "bunkie17554", cloudTags.GetRoleInstance())
|
||||
}
|
||||
|
||||
func TestContextTagsSetOnAggregateTelemetry(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
|
||||
transmitter := new(mocks.Transmitter)
|
||||
transmitter.On("Track", mock.Anything)
|
||||
|
||||
m := metric.New(
|
||||
"AggregateMetric",
|
||||
map[string]string{"kubernetes_container_name": "atcsvc", "kubernetes_pod_name": "bunkie17554"},
|
||||
map[string]interface{}{"value": 23.0, "count": 5},
|
||||
now,
|
||||
)
|
||||
|
||||
ai := ApplicationInsights{
|
||||
transmitter: transmitter,
|
||||
InstrumentationKey: "1234", // Fake, but necessary to enable tracking
|
||||
ContextTagSources: map[string]string{
|
||||
"ai.cloud.role": "kubernetes_container_name",
|
||||
"ai.cloud.roleInstance": "kubernetes_pod_name",
|
||||
"ai.user.id": "nonexistent",
|
||||
},
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
err := ai.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
mSet := []telegraf.Metric{m}
|
||||
err = ai.Write(mSet)
|
||||
require.NoError(t, err)
|
||||
transmitter.AssertNumberOfCalls(t, "Track", 1)
|
||||
metricTelemetry := transmitter.Calls[0].Arguments.Get(0).(*appinsights.AggregateMetricTelemetry)
|
||||
cloudTags := metricTelemetry.Tags.Cloud()
|
||||
require.Equal(t, "atcsvc", cloudTags.GetRole())
|
||||
require.Equal(t, "bunkie17554", cloudTags.GetRoleInstance())
|
||||
}
|
||||
|
||||
func closed() <-chan struct{} {
|
||||
closed := make(chan struct{})
|
||||
close(closed)
|
||||
return closed
|
||||
}
|
||||
|
||||
func unfinished() <-chan struct{} {
|
||||
unfinished := make(chan struct{})
|
||||
return unfinished
|
||||
}
|
||||
|
||||
func verifyAggregateTelemetry(t *testing.T, m telegraf.Metric, valueField, countField string, telemetry *appinsights.AggregateMetricTelemetry) {
|
||||
verifyAggregateField := func(fieldName string, telemetryValue float64) {
|
||||
metricRawFieldValue, found := m.Fields()[fieldName]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := toFloat64(metricRawFieldValue); err == nil {
|
||||
require.InDelta(t, metricRawFieldValue, telemetryValue, testutil.DefaultDelta, "Telemetry property %s does not match the metric field", fieldName)
|
||||
}
|
||||
}
|
||||
require.Equal(t, m.Name(), telemetry.Name, "Telemetry name should be the same as metric name")
|
||||
require.InDelta(t, m.Fields()[valueField], telemetry.Value, testutil.DefaultDelta, "Telemetry value does not match metric value field")
|
||||
require.EqualValues(t, m.Fields()[countField], telemetry.Count, "Telemetry sample count does not mach metric sample count field")
|
||||
verifyAggregateField("min", telemetry.Min)
|
||||
verifyAggregateField("max", telemetry.Max)
|
||||
verifyAggregateField("stdev", telemetry.StdDev)
|
||||
verifyAggregateField("variance", telemetry.Variance)
|
||||
require.Equal(t, m.Time(), telemetry.Timestamp, "Telemetry and metric timestamps do not match")
|
||||
assertMapContains(t, m.Tags(), telemetry.Properties)
|
||||
}
|
||||
|
||||
func verifySimpleTelemetry(t *testing.T, m telegraf.Metric, valueField, expectedTelemetryName string, telemetry *appinsights.MetricTelemetry) {
|
||||
require.Equal(t, expectedTelemetryName, telemetry.Name, "Telemetry name is not what was expected")
|
||||
require.InDelta(t, m.Fields()[valueField], telemetry.Value, testutil.DefaultDelta, "Telemetry value does not match metric value field")
|
||||
require.Equal(t, m.Time(), telemetry.Timestamp, "Telemetry and metric timestamps do not match")
|
||||
assertMapContains(t, m.Tags(), telemetry.Properties)
|
||||
}
|
||||
|
||||
func verifyAdditionalTelemetry(
|
||||
t *testing.T,
|
||||
m telegraf.Metric,
|
||||
transmitter *mocks.Transmitter,
|
||||
additionalMetricValueFields []string,
|
||||
telemetryNamePrefix string,
|
||||
) {
|
||||
for _, fieldName := range additionalMetricValueFields {
|
||||
expectedTelemetryName := telemetryNamePrefix + "_" + fieldName
|
||||
telemetry := findTransmittedTelemetry(transmitter, expectedTelemetryName)
|
||||
require.NotNil(t, telemetry, "Expected telemetry named %s to be created, but could not find it", expectedTelemetryName)
|
||||
if telemetry != nil {
|
||||
verifySimpleTelemetry(t, m, fieldName, expectedTelemetryName, telemetry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func findTransmittedTelemetry(transmitter *mocks.Transmitter, telemetryName string) *appinsights.MetricTelemetry {
|
||||
for _, call := range transmitter.Calls {
|
||||
telemetry, isMetricTelemetry := call.Arguments.Get(0).(*appinsights.MetricTelemetry)
|
||||
if isMetricTelemetry && telemetry.Name == telemetryName {
|
||||
return telemetry
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func assertMapContains(t *testing.T, expected, actual map[string]string) {
|
||||
if expected == nil && actual == nil {
|
||||
return
|
||||
}
|
||||
|
||||
require.NotNil(t, expected, "Maps not equal: expected is nil but actual is not")
|
||||
require.NotNil(t, actual, "Maps not equal: actual is nil but expected is not")
|
||||
|
||||
for k, v := range expected {
|
||||
av, ok := actual[k]
|
||||
require.True(t, ok, "Actual map does not contain a value for key %q", k)
|
||||
require.Equal(t, v, av, "The expected value for key %q is %q but the actual value is %q", k, v, av)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
package application_insights
|
||||
|
||||
import (
|
||||
"github.com/microsoft/ApplicationInsights-Go/appinsights"
|
||||
)
|
||||
|
||||
type diagnosticsMessageSubscriber struct {
|
||||
}
|
||||
|
||||
func (diagnosticsMessageSubscriber) Subscribe(handler appinsights.DiagnosticsMessageHandler) appinsights.DiagnosticsMessageListener {
|
||||
return appinsights.NewDiagnosticsMessageListener(handler)
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
package mocks
|
||||
|
||||
import "github.com/stretchr/testify/mock"
|
||||
|
||||
// DiagnosticsMessageSubscriber is an autogenerated mock type for the DiagnosticsMessageSubscriber type
|
||||
type DiagnosticsMessageListener struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (_m *DiagnosticsMessageListener) Remove() {
|
||||
_m.Called()
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
// Code generated by mockery v1.0.0. DO NOT EDIT.
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"github.com/microsoft/ApplicationInsights-Go/appinsights"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// DiagnosticsMessageSubscriber is an autogenerated mock type for the DiagnosticsMessageSubscriber type
|
||||
type DiagnosticsMessageSubscriber struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Subscribe provides a mock function with given fields: _a0
|
||||
func (_m *DiagnosticsMessageSubscriber) Subscribe(_a0 appinsights.DiagnosticsMessageHandler) appinsights.DiagnosticsMessageListener {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 appinsights.DiagnosticsMessageListener
|
||||
if rf, ok := ret.Get(0).(func(appinsights.DiagnosticsMessageHandler) appinsights.DiagnosticsMessageListener); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(appinsights.DiagnosticsMessageListener)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
33
plugins/outputs/application_insights/mocks/transmitter.go
Normal file
33
plugins/outputs/application_insights/mocks/transmitter.go
Normal file
|
@ -0,0 +1,33 @@
|
|||
// Code generated by mockery v1.0.0. DO NOT EDIT.
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"github.com/microsoft/ApplicationInsights-Go/appinsights"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// Transmitter is an autogenerated mock type for the Transmitter type
|
||||
type Transmitter struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Close provides a mock function with given fields:
|
||||
func (_m *Transmitter) Close() <-chan struct{} {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 <-chan struct{}
|
||||
if rf, ok := ret.Get(0).(func() <-chan struct{}); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(<-chan struct{})
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Track provides a mock function with given fields: _a0
|
||||
func (_m *Transmitter) Track(_a0 appinsights.Telemetry) {
|
||||
_m.Called(_a0)
|
||||
}
|
25
plugins/outputs/application_insights/sample.conf
Normal file
25
plugins/outputs/application_insights/sample.conf
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Send metrics to Azure Application Insights
|
||||
[[outputs.application_insights]]
|
||||
## Instrumentation key of the Application Insights resource.
|
||||
instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
|
||||
|
||||
## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints
|
||||
# endpoint_url = "https://dc.services.visualstudio.com/v2/track"
|
||||
|
||||
## Timeout for closing (default: 5s).
|
||||
# timeout = "5s"
|
||||
|
||||
## Enable additional diagnostic logging.
|
||||
# enable_diagnostic_logging = false
|
||||
|
||||
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
|
||||
## plugin definition, otherwise additional config options are read as part of
|
||||
## the table
|
||||
|
||||
## Context Tag Sources add Application Insights context tags to a tag value.
|
||||
##
|
||||
## For list of allowed context tag keys see:
|
||||
## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
|
||||
# [outputs.application_insights.context_tag_sources]
|
||||
# "ai.cloud.role" = "kubernetes_container_name"
|
||||
# "ai.cloud.roleInstance" = "kubernetes_pod_name"
|
27
plugins/outputs/application_insights/transmitter.go
Normal file
27
plugins/outputs/application_insights/transmitter.go
Normal file
|
@ -0,0 +1,27 @@
|
|||
package application_insights
|
||||
|
||||
import (
|
||||
"github.com/microsoft/ApplicationInsights-Go/appinsights"
|
||||
)
|
||||
|
||||
type Transmitter struct {
|
||||
client appinsights.TelemetryClient
|
||||
}
|
||||
|
||||
func NewTransmitter(ikey, endpointURL string) *Transmitter {
|
||||
if len(endpointURL) == 0 {
|
||||
return &Transmitter{client: appinsights.NewTelemetryClient(ikey)}
|
||||
}
|
||||
|
||||
telemetryConfig := appinsights.NewTelemetryConfiguration(ikey)
|
||||
telemetryConfig.EndpointUrl = endpointURL
|
||||
return &Transmitter{client: appinsights.NewTelemetryClientFromConfig(telemetryConfig)}
|
||||
}
|
||||
|
||||
func (t *Transmitter) Track(telemetry appinsights.Telemetry) {
|
||||
t.client.Track(telemetry)
|
||||
}
|
||||
|
||||
func (t *Transmitter) Close() <-chan struct{} {
|
||||
return t.client.Channel().Close(0)
|
||||
}
|
300
plugins/outputs/azure_data_explorer/README.md
Normal file
300
plugins/outputs/azure_data_explorer/README.md
Normal file
|
@ -0,0 +1,300 @@
|
|||
# Azure Data Explorer Output Plugin
|
||||
|
||||
This plugin writes metrics to the [Azure Data Explorer][data_explorer],
|
||||
[Azure Synapse Data Explorer][synapse], and
|
||||
[Real time analytics in Fabric][fabric] services.
|
||||
|
||||
Azure Data Explorer is a distributed, columnar store, purpose built for any
|
||||
type of logs, metrics and time series data.
|
||||
|
||||
⭐ Telegraf v1.20.0
|
||||
🏷️ cloud, datastore
|
||||
💻 all
|
||||
|
||||
[data_explorer]: https://docs.microsoft.com/en-us/azure/data-explorer
|
||||
[synapse]: https://docs.microsoft.com/en-us/azure/synapse-analytics/data-explorer/data-explorer-overview
|
||||
[fabric]: https://learn.microsoft.com/en-us/fabric/real-time-analytics/overview
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
- [Create Azure Data Explorer cluster and
|
||||
database](https://docs.microsoft.com/en-us/azure/data-explorer/create-cluster-database-portal)
|
||||
- VM/compute or container to host Telegraf - it could be hosted locally where an
|
||||
app/service to be monitored is deployed or remotely on a dedicated monitoring
|
||||
compute/container.
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Sends metrics to Azure Data Explorer
|
||||
[[outputs.azure_data_explorer]]
|
||||
## The URI property of the Azure Data Explorer resource on Azure
|
||||
## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net
|
||||
endpoint_url = ""
|
||||
|
||||
## The Azure Data Explorer database that the metrics will be ingested into.
|
||||
## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion.
|
||||
## ex: "exampledatabase"
|
||||
database = ""
|
||||
|
||||
## Timeout for Azure Data Explorer operations
|
||||
# timeout = "20s"
|
||||
|
||||
## Type of metrics grouping used when pushing to Azure Data Explorer.
|
||||
## Default is "TablePerMetric" for one table per different metric.
|
||||
## For more information, please check the plugin README.
|
||||
# metrics_grouping_type = "TablePerMetric"
|
||||
|
||||
## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable").
|
||||
# table_name = ""
|
||||
|
||||
## Creates tables and relevant mapping if set to true(default).
|
||||
## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role.
|
||||
# create_tables = true
|
||||
|
||||
## Ingestion method to use.
|
||||
## Available options are
|
||||
## - managed -- streaming ingestion with fallback to batched ingestion or the "queued" method below
|
||||
## - queued -- queue up metrics data and process sequentially
|
||||
# ingestion_type = "queued"
|
||||
```
|
||||
|
||||
## Metrics Grouping
|
||||
|
||||
Metrics can be grouped in two ways to be sent to Azure Data Explorer. To specify
|
||||
which metric grouping type the plugin should use, the respective value should be
|
||||
given to the `metrics_grouping_type` in the config file. If no value is given to
|
||||
`metrics_grouping_type`, by default, the metrics will be grouped using
|
||||
`TablePerMetric`.
|
||||
|
||||
### TablePerMetric
|
||||
|
||||
The plugin will group the metrics by the metric name, and will send each group
|
||||
of metrics to an Azure Data Explorer table. If the table doesn't exist the
|
||||
plugin will create the table, if the table exists then the plugin will try to
|
||||
merge the Telegraf metric schema to the existing table. For more information
|
||||
about the merge process check the [`.create-merge` documentation][create-merge].
|
||||
|
||||
The table name will match the `name` property of the metric, this means that the
|
||||
name of the metric should comply with the Azure Data Explorer table naming
|
||||
constraints in case you plan to add a prefix to the metric name.
|
||||
|
||||
[create-merge]: https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command
|
||||
|
||||
### SingleTable
|
||||
|
||||
The plugin will send all the metrics received to a single Azure Data Explorer
|
||||
table. The name of the table must be supplied via `table_name` in the config
|
||||
file. If the table doesn't exist the plugin will create the table, if the table
|
||||
exists then the plugin will try to merge the Telegraf metric schema to the
|
||||
existing table. For more information about the merge process check the
|
||||
[`.create-merge` documentation][create-merge].
|
||||
|
||||
## Tables Schema
|
||||
|
||||
The schema of the Azure Data Explorer table will match the structure of the
|
||||
Telegraf `Metric` object. The corresponding Azure Data Explorer command
|
||||
generated by the plugin would be like the following:
|
||||
|
||||
```text
|
||||
.create-merge table ['table-name'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime)
|
||||
```
|
||||
|
||||
The corresponding table mapping would be like the following:
|
||||
|
||||
```text
|
||||
.create-or-alter table ['table-name'] ingestion json mapping 'table-name_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]'
|
||||
```
|
||||
|
||||
**Note**: This plugin will automatically create Azure Data Explorer tables and
|
||||
corresponding table mapping as per the above mentioned commands.
|
||||
|
||||
## Ingestion type
|
||||
|
||||
**Note**:
|
||||
[Streaming ingestion](https://aka.ms/AAhlg6s)
|
||||
has to be enabled on ADX [configure the ADX cluster]
|
||||
in case of `managed` option.
|
||||
Refer the query below to check if streaming is enabled
|
||||
|
||||
```kql
|
||||
.show database <DB-Name> policy streamingingestion
|
||||
```
|
||||
|
||||
## Authentication
|
||||
|
||||
### Supported Authentication Methods
|
||||
|
||||
This plugin provides several types of authentication. The plugin will check the
|
||||
existence of several specific environment variables, and consequently will
|
||||
choose the right method.
|
||||
|
||||
These methods are:
|
||||
|
||||
1. AAD Application Tokens (Service Principals with secrets or certificates).
|
||||
|
||||
For guidance on how to create and register an App in Azure Active Directory
|
||||
check [this article][register], and for more information on the Service
|
||||
Principals check [this article][principal].
|
||||
|
||||
2. AAD User Tokens
|
||||
|
||||
- Allows Telegraf to authenticate like a user. This method is mainly used
|
||||
for development purposes only.
|
||||
|
||||
3. Managed Service Identity (MSI) token
|
||||
|
||||
- If you are running Telegraf from Azure VM or infrastructure, then this is
|
||||
the preferred authentication method.
|
||||
|
||||
[register]: https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app#register-an-application
|
||||
|
||||
[principal]: https://docs.microsoft.com/en-us/azure/active-directory/develop/app-objects-and-service-principals
|
||||
|
||||
Whichever method, the designated Principal needs to be assigned the `Database
|
||||
User` role on the Database level in the Azure Data Explorer. This role will
|
||||
allow the plugin to create the required tables and ingest data into it. If
|
||||
`create_tables=false` then the designated principal only needs the `Database
|
||||
Ingestor` role at least.
|
||||
|
||||
### Configurations of the chosen Authentication Method
|
||||
|
||||
The plugin will authenticate using the first available of the following
|
||||
configurations, **it's important to understand that the assessment, and
|
||||
consequently choosing the authentication method, will happen in order as
|
||||
below**:
|
||||
|
||||
1. **Client Credentials**: Azure AD Application ID and Secret.
|
||||
|
||||
Set the following environment variables:
|
||||
|
||||
- `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate.
|
||||
- `AZURE_CLIENT_ID`: Specifies the app client ID to use.
|
||||
- `AZURE_CLIENT_SECRET`: Specifies the app secret to use.
|
||||
|
||||
2. **Client Certificate**: Azure AD Application ID and X.509 Certificate.
|
||||
|
||||
- `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate.
|
||||
- `AZURE_CLIENT_ID`: Specifies the app client ID to use.
|
||||
- `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use.
|
||||
- `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use.
|
||||
|
||||
3. **Resource Owner Password**: Azure AD User and Password. This grant type is
|
||||
*not recommended*, use device login instead if you need interactive login.
|
||||
|
||||
- `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate.
|
||||
- `AZURE_CLIENT_ID`: Specifies the app client ID to use.
|
||||
- `AZURE_USERNAME`: Specifies the username to use.
|
||||
- `AZURE_PASSWORD`: Specifies the password to use.
|
||||
|
||||
4. **Azure Managed Service Identity**: Delegate credential management to the
|
||||
platform. Requires that code is running in Azure, e.g. on a VM. All
|
||||
configuration is handled by Azure. See [Azure Managed Service Identity][msi]
|
||||
for more details. Only available when using the [Azure Resource
|
||||
Manager][arm].
|
||||
|
||||
[msi]: https://docs.microsoft.com/en-us/azure/active-directory/msi-overview
|
||||
[arm]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview
|
||||
|
||||
## Querying data collected in Azure Data Explorer
|
||||
|
||||
Examples of data transformations and queries that would be useful to gain
|
||||
insights -
|
||||
|
||||
### Using SQL input plugin
|
||||
|
||||
Sample SQL metrics data -
|
||||
|
||||
name | tags | timestamp | fields
|
||||
-----|------|-----------|-------
|
||||
sqlserver_database_io|{"database_name":"azure-sql-db2","file_type":"DATA","host":"adx-vm","logical_filename":"tempdev","measurement_db_type":"AzureSQLDB","physical_filename":"tempdb.mdf","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server"}|2021-09-09T13:51:20Z|{"current_size_mb":16,"database_id":2,"file_id":1,"read_bytes":2965504,"read_latency_ms":68,"reads":47,"rg_read_stall_ms":42,"rg_write_stall_ms":0,"space_used_mb":0,"write_bytes":1220608,"write_latency_ms":103,"writes":149}
|
||||
sqlserver_waitstats|{"database_name":"azure-sql-db2","host":"adx-vm","measurement_db_type":"AzureSQLDB","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server","wait_category":"Worker Thread","wait_type":"THREADPOOL"}|2021-09-09T13:51:20Z|{"max_wait_time_ms":15,"resource_wait_ms":4469,"signal_wait_time_ms":0,"wait_time_ms":4469,"waiting_tasks_count":1464}
|
||||
|
||||
Since collected metrics object is of complex type so "fields" and "tags" are
|
||||
stored as dynamic data type, multiple ways to query this data-
|
||||
|
||||
1. Query JSON attributes directly: Azure Data Explorer provides an ability to
|
||||
query JSON data in raw format without parsing it, so JSON attributes can be
|
||||
queried directly in following way:
|
||||
|
||||
```text
|
||||
Tablename
|
||||
| where name == "sqlserver_azure_db_resource_stats" and todouble(fields.avg_cpu_percent) > 7
|
||||
```
|
||||
|
||||
```text
|
||||
Tablename
|
||||
| distinct tostring(tags.database_name)
|
||||
```
|
||||
|
||||
**Note** - This approach could have performance impact in case of large
|
||||
volumes of data, use below mentioned approach for such cases.
|
||||
|
||||
1. Use [Update
|
||||
policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**:
|
||||
Transform dynamic data type columns using update policy. This is the
|
||||
recommended performant way for querying over large volumes of data compared
|
||||
to querying directly over JSON attributes:
|
||||
|
||||
```json
|
||||
// Function to transform data
|
||||
.create-or-alter function Transform_TargetTableName() {
|
||||
SourceTableName
|
||||
| mv-apply fields on (extend key = tostring(bag_keys(fields)[0]))
|
||||
| project fieldname=key, value=todouble(fields[key]), name, tags, timestamp
|
||||
}
|
||||
|
||||
// Create destination table with above query's results schema (if it doesn't exist already)
|
||||
.set-or-append TargetTableName <| Transform_TargetTableName() | limit 0
|
||||
|
||||
// Apply update policy on destination table
|
||||
.alter table TargetTableName policy update
|
||||
@'[{"IsEnabled": true, "Source": "SourceTableName", "Query": "Transform_TargetTableName()", "IsTransactional": true, "PropagateIngestionProperties": false}]'
|
||||
```
|
||||
|
||||
### Using syslog input plugin
|
||||
|
||||
Sample syslog data -
|
||||
|
||||
name | tags | timestamp | fields
|
||||
-----|------|-----------|-------
|
||||
syslog|{"appname":"azsecmond","facility":"user","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:36:44Z|{"facility_code":1,"message":" 2021/09/20 14:36:44.890110 Failed to connect to mdsd: dial unix /var/run/mdsd/default_djson.socket: connect: no such file or directory","procid":"2184","severity_code":6,"timestamp":"1632148604890477000","version":1}
|
||||
syslog|{"appname":"CRON","facility":"authpriv","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:37:01Z|{"facility_code":10,"message":" pam_unix(cron:session): session opened for user root by (uid=0)","procid":"26446","severity_code":6,"timestamp":"1632148621120781000","version":1}
|
||||
|
||||
There are multiple ways to flatten dynamic columns using 'extend' or
|
||||
'bag_unpack' operator. You can use either of these ways in above mentioned
|
||||
update policy function - 'Transform_TargetTableName()'
|
||||
|
||||
- Use
|
||||
[extend](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extendoperator)
|
||||
operator - This is the recommended approach compared to 'bag_unpack' as it is
|
||||
faster and robust. Even if schema changes, it will not break queries or
|
||||
dashboards.
|
||||
|
||||
```text
|
||||
Tablenmae
|
||||
| extend facility_code=toint(fields.facility_code), message=tostring(fields.message), procid= tolong(fields.procid), severity_code=toint(fields.severity_code),
|
||||
SysLogTimestamp=unixtime_nanoseconds_todatetime(tolong(fields.timestamp)), version= todouble(fields.version),
|
||||
appname= tostring(tags.appname), facility= tostring(tags.facility),host= tostring(tags.host), hostname=tostring(tags.hostname), severity=tostring(tags.severity)
|
||||
| project-away fields, tags
|
||||
```
|
||||
|
||||
- Use [bag_unpack
|
||||
plugin](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/bag-unpackplugin)
|
||||
to unpack the dynamic type columns automatically. This method could lead to
|
||||
issues if source schema changes as its dynamically expanding columns.
|
||||
|
||||
```text
|
||||
Tablename
|
||||
| evaluate bag_unpack(tags, columnsConflict='replace_source')
|
||||
| evaluate bag_unpack(fields, columnsConflict='replace_source')
|
||||
```
|
118
plugins/outputs/azure_data_explorer/azure_data_explorer.go
Normal file
118
plugins/outputs/azure_data_explorer/azure_data_explorer.go
Normal file
|
@ -0,0 +1,118 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package azure_data_explorer
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-kusto-go/kusto/ingest"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
common_adx "github.com/influxdata/telegraf/plugins/common/adx"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf/plugins/serializers/json"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type AzureDataExplorer struct {
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
common_adx.Config
|
||||
|
||||
serializer telegraf.Serializer
|
||||
client *common_adx.Client
|
||||
}
|
||||
|
||||
func (*AzureDataExplorer) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Initialize the client and the ingestor
|
||||
func (adx *AzureDataExplorer) Init() error {
|
||||
serializer := &json.Serializer{
|
||||
TimestampUnits: config.Duration(time.Nanosecond),
|
||||
TimestampFormat: time.RFC3339Nano,
|
||||
}
|
||||
if err := serializer.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
adx.serializer = serializer
|
||||
return nil
|
||||
}
|
||||
|
||||
func (adx *AzureDataExplorer) Connect() error {
|
||||
var err error
|
||||
if adx.client, err = adx.Config.NewClient("Kusto.Telegraf", adx.Log); err != nil {
|
||||
return fmt.Errorf("creating new client failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clean up and close the ingestor
|
||||
func (adx *AzureDataExplorer) Close() error {
|
||||
return adx.client.Close()
|
||||
}
|
||||
|
||||
func (adx *AzureDataExplorer) Write(metrics []telegraf.Metric) error {
|
||||
if adx.MetricsGrouping == common_adx.TablePerMetric {
|
||||
return adx.writeTablePerMetric(metrics)
|
||||
}
|
||||
return adx.writeSingleTable(metrics)
|
||||
}
|
||||
|
||||
func (adx *AzureDataExplorer) writeTablePerMetric(metrics []telegraf.Metric) error {
|
||||
tableMetricGroups := make(map[string][]byte)
|
||||
// Group metrics by name and serialize them
|
||||
for _, m := range metrics {
|
||||
tableName := m.Name()
|
||||
metricInBytes, err := adx.serializer.Serialize(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if existingBytes, ok := tableMetricGroups[tableName]; ok {
|
||||
tableMetricGroups[tableName] = append(existingBytes, metricInBytes...)
|
||||
} else {
|
||||
tableMetricGroups[tableName] = metricInBytes
|
||||
}
|
||||
}
|
||||
|
||||
// Push the metrics for each table
|
||||
format := ingest.FileFormat(ingest.JSON)
|
||||
for tableName, tableMetrics := range tableMetricGroups {
|
||||
if err := adx.client.PushMetrics(format, tableName, tableMetrics); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (adx *AzureDataExplorer) writeSingleTable(metrics []telegraf.Metric) error {
|
||||
// serialise each metric in metrics - store in byte[]
|
||||
metricsArray := make([]byte, 0)
|
||||
for _, m := range metrics {
|
||||
metricsInBytes, err := adx.serializer.Serialize(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metricsArray = append(metricsArray, metricsInBytes...)
|
||||
}
|
||||
|
||||
// push metrics to a single table
|
||||
format := ingest.FileFormat(ingest.JSON)
|
||||
err := adx.client.PushMetrics(format, adx.TableName, metricsArray)
|
||||
return err
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("azure_data_explorer", func() telegraf.Output {
|
||||
return &AzureDataExplorer{
|
||||
Config: common_adx.Config{
|
||||
CreateTables: true,
|
||||
Timeout: config.Duration(20 * time.Second)},
|
||||
}
|
||||
})
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
package azure_data_explorer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
common_adx "github.com/influxdata/telegraf/plugins/common/adx"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
plugin := AzureDataExplorer{
|
||||
Log: testutil.Logger{},
|
||||
client: &common_adx.Client{},
|
||||
Config: common_adx.Config{
|
||||
Endpoint: "someendpoint",
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Init()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestConnectBlankEndpointData(t *testing.T) {
|
||||
plugin := AzureDataExplorer{
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.ErrorContains(t, plugin.Connect(), "endpoint configuration cannot be empty")
|
||||
}
|
31
plugins/outputs/azure_data_explorer/sample.conf
Normal file
31
plugins/outputs/azure_data_explorer/sample.conf
Normal file
|
@ -0,0 +1,31 @@
|
|||
# Sends metrics to Azure Data Explorer
|
||||
[[outputs.azure_data_explorer]]
|
||||
## The URI property of the Azure Data Explorer resource on Azure
|
||||
## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net
|
||||
endpoint_url = ""
|
||||
|
||||
## The Azure Data Explorer database that the metrics will be ingested into.
|
||||
## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion.
|
||||
## ex: "exampledatabase"
|
||||
database = ""
|
||||
|
||||
## Timeout for Azure Data Explorer operations
|
||||
# timeout = "20s"
|
||||
|
||||
## Type of metrics grouping used when pushing to Azure Data Explorer.
|
||||
## Default is "TablePerMetric" for one table per different metric.
|
||||
## For more information, please check the plugin README.
|
||||
# metrics_grouping_type = "TablePerMetric"
|
||||
|
||||
## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable").
|
||||
# table_name = ""
|
||||
|
||||
## Creates tables and relevant mapping if set to true(default).
|
||||
## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role.
|
||||
# create_tables = true
|
||||
|
||||
## Ingestion method to use.
|
||||
## Available options are
|
||||
## - managed -- streaming ingestion with fallback to batched ingestion or the "queued" method below
|
||||
## - queued -- queue up metrics data and process sequentially
|
||||
# ingestion_type = "queued"
|
206
plugins/outputs/azure_monitor/README.md
Normal file
206
plugins/outputs/azure_monitor/README.md
Normal file
|
@ -0,0 +1,206 @@
|
|||
# Azure Monitor Output Plugin
|
||||
|
||||
This plugin writes metrics to [Azure Monitor][azure_monitor] which has
|
||||
a metric resolution of one minute. To accomodate for this in Telegraf, the
|
||||
plugin will automatically aggregate metrics into one minute buckets and send
|
||||
them to the service on every flush interval.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> The Azure Monitor custom metrics service is currently in preview and might
|
||||
> not be available in all Azure regions.
|
||||
> Please also take the [metric time limitations](#metric-time-limitations) into
|
||||
> account!
|
||||
|
||||
The metrics from each input plugin will be written to a separate Azure Monitor
|
||||
namespace, prefixed with `Telegraf/` by default. The field name for each metric
|
||||
is written as the Azure Monitor metric name. All field values are written as a
|
||||
summarized set that includes: min, max, sum, count. Tags are written as a
|
||||
dimension on each Azure Monitor metric.
|
||||
|
||||
⭐ Telegraf v1.8.0
|
||||
🏷️ cloud, datastore
|
||||
💻 all
|
||||
|
||||
[azure_monitor]: https://learn.microsoft.com/en-us/azure/azure-monitor
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Send aggregate metrics to Azure Monitor
|
||||
[[outputs.azure_monitor]]
|
||||
## Timeout for HTTP writes.
|
||||
# timeout = "20s"
|
||||
|
||||
## Set the namespace prefix, defaults to "Telegraf/<input-name>".
|
||||
# namespace_prefix = "Telegraf/"
|
||||
|
||||
## Azure Monitor doesn't have a string value type, so convert string
|
||||
## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows
|
||||
## a maximum of 10 dimensions so Telegraf will only send the first 10
|
||||
## alphanumeric dimensions.
|
||||
# strings_as_dimensions = false
|
||||
|
||||
## Both region and resource_id must be set or be available via the
|
||||
## Instance Metadata service on Azure Virtual Machines.
|
||||
#
|
||||
## Azure Region to publish metrics against.
|
||||
## ex: region = "southcentralus"
|
||||
# region = ""
|
||||
#
|
||||
## The Azure Resource ID against which metric will be logged, e.g.
|
||||
## ex: resource_id = "/subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.Compute/virtualMachines/<vm_name>"
|
||||
# resource_id = ""
|
||||
|
||||
## Optionally, if in Azure US Government, China, or other sovereign
|
||||
## cloud environment, set the appropriate REST endpoint for receiving
|
||||
## metrics. (Note: region may be unused in this context)
|
||||
# endpoint_url = "https://monitoring.core.usgovcloudapi.net"
|
||||
|
||||
## Time limitations of metric to send
|
||||
## Documentation can be found here:
|
||||
## https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-store-custom-rest-api?tabs=rest#timestamp
|
||||
## However, the returned (400) error message might document more strict or
|
||||
## relaxed settings. By default, only past metrics witin the limit are sent.
|
||||
# timestamp_limit_past = "30m"
|
||||
# timestamp_limit_future = "-1m"
|
||||
```
|
||||
|
||||
## Setup
|
||||
|
||||
1. [Register the `microsoft.insights` resource provider in your Azure
|
||||
subscription][resource provider].
|
||||
1. If using Managed Service Identities to authenticate an Azure VM, [enable
|
||||
system-assigned managed identity][enable msi].
|
||||
1. Use a region that supports Azure Monitor Custom Metrics, For regions with
|
||||
Custom Metrics support, an endpoint will be available with the format
|
||||
`https://<region>.monitoring.azure.com`.
|
||||
|
||||
[resource provider]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services
|
||||
|
||||
[enable msi]: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/qs-configure-portal-windows-vm
|
||||
|
||||
### Region and Resource ID
|
||||
|
||||
The plugin will attempt to discover the region and resource ID using the Azure
|
||||
VM Instance Metadata service. If Telegraf is not running on a virtual machine or
|
||||
the VM Instance Metadata service is not available, the following variables are
|
||||
required for the output to function.
|
||||
|
||||
* region
|
||||
* resource_id
|
||||
|
||||
### Authentication
|
||||
|
||||
This plugin uses one of several different types of authenticate methods. The
|
||||
preferred authentication methods are different from the *order* in which each
|
||||
authentication is checked. Here are the preferred authentication methods:
|
||||
|
||||
1. Managed Service Identity (MSI) token: This is the preferred authentication
|
||||
method. Telegraf will automatically authenticate using this method when
|
||||
running on Azure VMs.
|
||||
2. AAD Application Tokens (Service Principals)
|
||||
|
||||
* Primarily useful if Telegraf is writing metrics for other resources.
|
||||
[More information][principal].
|
||||
* A Service Principal or User Principal needs to be assigned the `Monitoring
|
||||
Metrics Publisher` role on the resource(s) metrics will be emitted
|
||||
against.
|
||||
|
||||
3. AAD User Tokens (User Principals)
|
||||
|
||||
* Allows Telegraf to authenticate like a user. It is best to use this method
|
||||
for development.
|
||||
|
||||
[principal]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects
|
||||
|
||||
The plugin will authenticate using the first available of the following
|
||||
configurations:
|
||||
|
||||
1. **Client Credentials**: Azure AD Application ID and Secret. Set the following
|
||||
environment variables:
|
||||
|
||||
* `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate.
|
||||
* `AZURE_CLIENT_ID`: Specifies the app client ID to use.
|
||||
* `AZURE_CLIENT_SECRET`: Specifies the app secret to use.
|
||||
|
||||
1. **Client Certificate**: Azure AD Application ID and X.509 Certificate.
|
||||
|
||||
* `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate.
|
||||
* `AZURE_CLIENT_ID`: Specifies the app client ID to use.
|
||||
* `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use.
|
||||
* `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use.
|
||||
|
||||
1. **Resource Owner Password**: Azure AD User and Password. This grant type is
|
||||
*not recommended*, use device login instead if you need interactive login.
|
||||
|
||||
* `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate.
|
||||
* `AZURE_CLIENT_ID`: Specifies the app client ID to use.
|
||||
* `AZURE_USERNAME`: Specifies the username to use.
|
||||
* `AZURE_PASSWORD`: Specifies the password to use.
|
||||
|
||||
1. **Azure Managed Service Identity**: Delegate credential management to the
|
||||
platform. Requires that code is running in Azure, e.g. on a VM. All
|
||||
configuration is handled by Azure. See [Azure Managed Service Identity][msi]
|
||||
for more details. Only available when using the [Azure Resource
|
||||
Manager][arm].
|
||||
|
||||
[msi]: https://docs.microsoft.com/en-us/azure/active-directory/msi-overview
|
||||
[arm]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview
|
||||
|
||||
> [!NOTE]
|
||||
> As shown above, the last option (#4) is the preferred way to authenticate
|
||||
> when running Telegraf on Azure VMs.
|
||||
|
||||
## Dimensions
|
||||
|
||||
Azure Monitor only accepts values with a numeric type. The plugin will drop
|
||||
fields with a string type by default. The plugin can set all string type fields
|
||||
as extra dimensions in the Azure Monitor custom metric by setting the
|
||||
configuration option `strings_as_dimensions` to `true`.
|
||||
|
||||
Keep in mind, Azure Monitor allows a maximum of 10 dimensions per metric. The
|
||||
plugin will deterministically dropped any dimensions that exceed the 10
|
||||
dimension limit.
|
||||
|
||||
To convert only a subset of string-typed fields as dimensions, enable
|
||||
`strings_as_dimensions` and use the [`fieldinclude` or `fieldexclude`
|
||||
modifiers][conf-modifiers] to limit the string-typed fields that are sent to
|
||||
the plugin.
|
||||
|
||||
[conf-modifiers]: ../../../docs/CONFIGURATION.md#modifiers
|
||||
|
||||
## Metric time limitations
|
||||
|
||||
Azure Monitor won't accept metrics too far in the past or future. Keep this in
|
||||
mind when configuring your output buffer limits or other variables, such as
|
||||
flush intervals, or when using input sources that could cause metrics to be
|
||||
out of this allowed range.
|
||||
|
||||
According to the [documentation][timestamp_docs], the timestamp should not be
|
||||
older than 20 minutes or more than 5 minutes in the future at the time when the
|
||||
metric is sent to the Azure Monitor service. However, HTTP `400` error messages
|
||||
returned by the service might specify other values such as 30 minutes in the
|
||||
past and 4 minutes in the future.
|
||||
|
||||
You can control the timeframe actually sent using the `timestamp_limit_past` and
|
||||
`timestamp_limit_future` settings. By default only metrics between 30 minutes
|
||||
and up to one minute in the past are sent. The lower limit represents the more
|
||||
permissive limit received in the `400` error messages. The upper limit leaves
|
||||
enough time for aggregation to happen by not sending aggregations too early.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> When adapting the limit you need to take the limits permitted by the service
|
||||
> as well as latency when sending metrics into account. Furthermore, you sould
|
||||
> not send metrics too early as in this case aggregation might not happen and
|
||||
> values are misleading.
|
||||
|
||||
[timestamp_docs]: https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-store-custom-rest-api?tabs=rest#timestamp
|
597
plugins/outputs/azure_monitor/azure_monitor.go
Normal file
597
plugins/outputs/azure_monitor/azure_monitor.go
Normal file
|
@ -0,0 +1,597 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package azure_monitor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure/auth"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
const (
|
||||
vmInstanceMetadataURL = "http://169.254.169.254/metadata/instance?api-version=2017-12-01"
|
||||
resourceIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s"
|
||||
resourceIDScaleSetTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s"
|
||||
maxRequestBodySize = 4000000
|
||||
)
|
||||
|
||||
var invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
|
||||
|
||||
type dimension struct {
|
||||
name string
|
||||
value string
|
||||
}
|
||||
|
||||
type aggregate struct {
|
||||
name string
|
||||
min float64
|
||||
max float64
|
||||
sum float64
|
||||
count int64
|
||||
dimensions []dimension
|
||||
updated bool
|
||||
}
|
||||
|
||||
type AzureMonitor struct {
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
NamespacePrefix string `toml:"namespace_prefix"`
|
||||
StringsAsDimensions bool `toml:"strings_as_dimensions"`
|
||||
Region string `toml:"region"`
|
||||
ResourceID string `toml:"resource_id"`
|
||||
EndpointURL string `toml:"endpoint_url"`
|
||||
TimestampLimitPast config.Duration `toml:"timestamp_limit_past"`
|
||||
TimestampLimitFuture config.Duration `toml:"timestamp_limit_future"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
url string
|
||||
preparer autorest.Preparer
|
||||
client *http.Client
|
||||
|
||||
cache map[time.Time]map[uint64]*aggregate
|
||||
timeFunc func() time.Time
|
||||
|
||||
MetricOutsideWindow selfstat.Stat
|
||||
}
|
||||
|
||||
func (*AzureMonitor) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (a *AzureMonitor) Init() error {
|
||||
a.cache = make(map[time.Time]map[uint64]*aggregate, 36)
|
||||
|
||||
authorizer, err := auth.NewAuthorizerFromEnvironmentWithResource("https://monitoring.azure.com/")
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating authorizer failed: %w", err)
|
||||
}
|
||||
a.preparer = autorest.CreatePreparer(authorizer.WithAuthorization())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AzureMonitor) Connect() error {
|
||||
a.client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
Timeout: time.Duration(a.Timeout),
|
||||
}
|
||||
|
||||
// If information is missing try to retrieve it from the Azure VM instance
|
||||
if a.Region == "" || a.ResourceID == "" {
|
||||
region, resourceID, err := vmInstanceMetadata(a.client)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting VM metadata failed: %w", err)
|
||||
}
|
||||
|
||||
if a.Region == "" {
|
||||
a.Region = region
|
||||
}
|
||||
|
||||
if a.ResourceID == "" {
|
||||
a.ResourceID = resourceID
|
||||
}
|
||||
}
|
||||
|
||||
if a.ResourceID == "" {
|
||||
return errors.New("no resource ID configured or available via VM instance metadata")
|
||||
}
|
||||
|
||||
if a.EndpointURL == "" {
|
||||
if a.Region == "" {
|
||||
return errors.New("no region configured or available via VM instance metadata")
|
||||
}
|
||||
a.url = fmt.Sprintf("https://%s.monitoring.azure.com%s/metrics", a.Region, a.ResourceID)
|
||||
} else {
|
||||
a.url = a.EndpointURL + a.ResourceID + "/metrics"
|
||||
}
|
||||
a.Log.Debugf("Writing to Azure Monitor URL: %s", a.url)
|
||||
|
||||
a.MetricOutsideWindow = selfstat.Register(
|
||||
"azure_monitor",
|
||||
"metric_outside_window",
|
||||
map[string]string{
|
||||
"region": a.Region,
|
||||
"resource_id": a.ResourceID,
|
||||
},
|
||||
)
|
||||
|
||||
a.Reset()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close shuts down an any active connections
|
||||
func (a *AzureMonitor) Close() error {
|
||||
a.client.CloseIdleConnections()
|
||||
a.client = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add will append a metric to the output aggregate
|
||||
func (a *AzureMonitor) Add(m telegraf.Metric) {
|
||||
// Azure Monitor only supports aggregates 30 minutes into the past and 4
|
||||
// minutes into the future. Future metrics are dropped when pushed.
|
||||
tbucket := m.Time().Truncate(time.Minute)
|
||||
if tbucket.Before(a.timeFunc().Add(-time.Duration(a.TimestampLimitPast))) {
|
||||
a.MetricOutsideWindow.Incr(1)
|
||||
return
|
||||
}
|
||||
|
||||
// Azure Monitor doesn't have a string value type, so convert string fields
|
||||
// to dimensions (a.k.a. tags) if enabled.
|
||||
if a.StringsAsDimensions {
|
||||
for _, f := range m.FieldList() {
|
||||
if v, ok := f.Value.(string); ok {
|
||||
m.AddTag(f.Key, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range m.FieldList() {
|
||||
fv, err := internal.ToFloat64(f.Value)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Azure Monitor does not support fields so the field name is appended
|
||||
// to the metric name.
|
||||
sanitizeKey := invalidNameCharRE.ReplaceAllString(f.Key, "_")
|
||||
name := m.Name() + "-" + sanitizeKey
|
||||
id := hashIDWithField(m.HashID(), f.Key)
|
||||
|
||||
// Create the time bucket if doesn't exist
|
||||
if _, ok := a.cache[tbucket]; !ok {
|
||||
a.cache[tbucket] = make(map[uint64]*aggregate)
|
||||
}
|
||||
|
||||
// Fetch existing aggregate
|
||||
agg, ok := a.cache[tbucket][id]
|
||||
if !ok {
|
||||
dimensions := make([]dimension, 0, len(m.TagList()))
|
||||
for _, tag := range m.TagList() {
|
||||
dimensions = append(dimensions, dimension{
|
||||
name: tag.Key,
|
||||
value: tag.Value,
|
||||
})
|
||||
}
|
||||
a.cache[tbucket][id] = &aggregate{
|
||||
name: name,
|
||||
dimensions: dimensions,
|
||||
min: fv,
|
||||
max: fv,
|
||||
sum: fv,
|
||||
count: 1,
|
||||
updated: true,
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if fv < agg.min {
|
||||
agg.min = fv
|
||||
}
|
||||
if fv > agg.max {
|
||||
agg.max = fv
|
||||
}
|
||||
agg.sum += fv
|
||||
agg.count++
|
||||
agg.updated = true
|
||||
}
|
||||
}
|
||||
|
||||
// Push sends metrics to the output metric buffer
|
||||
func (a *AzureMonitor) Push() []telegraf.Metric {
|
||||
var metrics []telegraf.Metric
|
||||
for tbucket, aggs := range a.cache {
|
||||
// Do not send metrics early
|
||||
if tbucket.After(a.timeFunc().Add(time.Duration(a.TimestampLimitFuture))) {
|
||||
continue
|
||||
}
|
||||
for _, agg := range aggs {
|
||||
// Only send aggregates that have had an update since the last push.
|
||||
if !agg.updated {
|
||||
continue
|
||||
}
|
||||
|
||||
tags := make(map[string]string, len(agg.dimensions))
|
||||
for _, tag := range agg.dimensions {
|
||||
tags[tag.name] = tag.value
|
||||
}
|
||||
|
||||
m := metric.New(agg.name,
|
||||
tags,
|
||||
map[string]interface{}{
|
||||
"min": agg.min,
|
||||
"max": agg.max,
|
||||
"sum": agg.sum,
|
||||
"count": agg.count,
|
||||
},
|
||||
tbucket,
|
||||
)
|
||||
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
}
|
||||
return metrics
|
||||
}
|
||||
|
||||
// Reset clears the cache of aggregate metrics
|
||||
func (a *AzureMonitor) Reset() {
|
||||
for tbucket := range a.cache {
|
||||
// Remove aggregates older than 30 minutes
|
||||
if tbucket.Before(a.timeFunc().Add(-time.Duration(a.TimestampLimitPast))) {
|
||||
delete(a.cache, tbucket)
|
||||
continue
|
||||
}
|
||||
// Metrics updated within the latest 1m have not been pushed and should
|
||||
// not be cleared.
|
||||
if tbucket.After(a.timeFunc().Add(time.Duration(a.TimestampLimitFuture))) {
|
||||
continue
|
||||
}
|
||||
for id := range a.cache[tbucket] {
|
||||
a.cache[tbucket][id].updated = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes metrics to the remote endpoint
|
||||
func (a *AzureMonitor) Write(metrics []telegraf.Metric) error {
|
||||
now := a.timeFunc()
|
||||
tsEarliest := now.Add(-time.Duration(a.TimestampLimitPast))
|
||||
tsLatest := now.Add(time.Duration(a.TimestampLimitFuture))
|
||||
|
||||
writeErr := &internal.PartialWriteError{
|
||||
MetricsAccept: make([]int, 0, len(metrics)),
|
||||
}
|
||||
azmetrics := make(map[uint64]*azureMonitorMetric, len(metrics))
|
||||
for i, m := range metrics {
|
||||
// Skip metrics that our outside of the valid timespan
|
||||
if m.Time().Before(tsEarliest) || m.Time().After(tsLatest) {
|
||||
a.Log.Tracef("Metric outside acceptable time window: %v", m)
|
||||
a.MetricOutsideWindow.Incr(1)
|
||||
writeErr.Err = errors.New("metric(s) outside of acceptable time window")
|
||||
writeErr.MetricsReject = append(writeErr.MetricsReject, i)
|
||||
continue
|
||||
}
|
||||
|
||||
amm, err := translate(m, a.NamespacePrefix)
|
||||
if err != nil {
|
||||
a.Log.Errorf("Could not create azure metric for %q; discarding point", m.Name())
|
||||
if writeErr.Err == nil {
|
||||
writeErr.Err = errors.New("translating metric(s) failed")
|
||||
}
|
||||
writeErr.MetricsReject = append(writeErr.MetricsReject, i)
|
||||
continue
|
||||
}
|
||||
|
||||
id := hashIDWithTagKeysOnly(m)
|
||||
if azm, ok := azmetrics[id]; !ok {
|
||||
azmetrics[id] = amm
|
||||
azmetrics[id].index = i
|
||||
} else {
|
||||
azmetrics[id].Data.BaseData.Series = append(
|
||||
azm.Data.BaseData.Series,
|
||||
amm.Data.BaseData.Series...,
|
||||
)
|
||||
azmetrics[id].index = i
|
||||
}
|
||||
}
|
||||
|
||||
if len(azmetrics) == 0 {
|
||||
if writeErr.Err == nil {
|
||||
return nil
|
||||
}
|
||||
return writeErr
|
||||
}
|
||||
|
||||
var buffer bytes.Buffer
|
||||
buffer.Grow(maxRequestBodySize)
|
||||
batchIndices := make([]int, 0, len(azmetrics))
|
||||
for _, m := range azmetrics {
|
||||
// Azure Monitor accepts new batches of points in new-line delimited
|
||||
// JSON, following RFC 4288 (see https://github.com/ndjson/ndjson-spec).
|
||||
buf, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
writeErr.MetricsReject = append(writeErr.MetricsReject, m.index)
|
||||
writeErr.Err = err
|
||||
continue
|
||||
}
|
||||
batchIndices = append(batchIndices, m.index)
|
||||
|
||||
// Azure Monitor's maximum request body size of 4MB. Send batches that
|
||||
// exceed this size via separate write requests.
|
||||
if buffer.Len()+len(buf)+1 > maxRequestBodySize {
|
||||
if retryable, err := a.send(buffer.Bytes()); err != nil {
|
||||
writeErr.Err = err
|
||||
if !retryable {
|
||||
writeErr.MetricsReject = append(writeErr.MetricsAccept, batchIndices...)
|
||||
}
|
||||
return writeErr
|
||||
}
|
||||
writeErr.MetricsAccept = append(writeErr.MetricsAccept, batchIndices...)
|
||||
batchIndices = make([]int, 0, len(azmetrics))
|
||||
buffer.Reset()
|
||||
}
|
||||
if _, err := buffer.Write(buf); err != nil {
|
||||
return fmt.Errorf("writing to buffer failed: %w", err)
|
||||
}
|
||||
if err := buffer.WriteByte('\n'); err != nil {
|
||||
return fmt.Errorf("writing to buffer failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if retryable, err := a.send(buffer.Bytes()); err != nil {
|
||||
writeErr.Err = err
|
||||
if !retryable {
|
||||
writeErr.MetricsReject = append(writeErr.MetricsAccept, batchIndices...)
|
||||
}
|
||||
return writeErr
|
||||
}
|
||||
writeErr.MetricsAccept = append(writeErr.MetricsAccept, batchIndices...)
|
||||
|
||||
if writeErr.Err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return writeErr
|
||||
}
|
||||
|
||||
func (a *AzureMonitor) send(body []byte) (bool, error) {
|
||||
var buf bytes.Buffer
|
||||
g := gzip.NewWriter(&buf)
|
||||
if _, err := g.Write(body); err != nil {
|
||||
return false, fmt.Errorf("zipping content failed: %w", err)
|
||||
}
|
||||
if err := g.Close(); err != nil {
|
||||
return false, fmt.Errorf("closing gzip writer failed: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", a.url, &buf)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("creating request failed: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Encoding", "gzip")
|
||||
req.Header.Set("Content-Type", "application/x-ndjson")
|
||||
|
||||
// Add the authorization header. WithAuthorization will automatically
|
||||
// refresh the token if needed.
|
||||
req, err = a.preparer.Prepare(req)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("unable to fetch authentication credentials: %w", err)
|
||||
}
|
||||
|
||||
resp, err := a.client.Do(req)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
a.client.CloseIdleConnections()
|
||||
a.client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
Timeout: time.Duration(a.Timeout),
|
||||
}
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 200 && resp.StatusCode <= 299 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
retryable := resp.StatusCode != 400
|
||||
if respbody, err := io.ReadAll(resp.Body); err == nil {
|
||||
return retryable, fmt.Errorf("failed to write batch: [%d] %s: %s", resp.StatusCode, resp.Status, string(respbody))
|
||||
}
|
||||
|
||||
return retryable, fmt.Errorf("failed to write batch: [%d] %s", resp.StatusCode, resp.Status)
|
||||
}
|
||||
|
||||
// vmMetadata retrieves metadata about the current Azure VM
|
||||
func vmInstanceMetadata(c *http.Client) (region, resourceID string, err error) {
|
||||
req, err := http.NewRequest("GET", vmInstanceMetadataURL, nil)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("error creating request: %w", err)
|
||||
}
|
||||
req.Header.Set("Metadata", "true")
|
||||
|
||||
resp, err := c.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if resp.StatusCode >= 300 || resp.StatusCode < 200 {
|
||||
return "", "", fmt.Errorf("unable to fetch instance metadata: [%s] %d",
|
||||
vmInstanceMetadataURL, resp.StatusCode)
|
||||
}
|
||||
|
||||
var metadata virtualMachineMetadata
|
||||
if err := json.Unmarshal(body, &metadata); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
region = metadata.Compute.Location
|
||||
resourceID = metadata.ResourceID()
|
||||
|
||||
return region, resourceID, nil
|
||||
}
|
||||
|
||||
func hashIDWithField(id uint64, fk string) uint64 {
|
||||
h := fnv.New64a()
|
||||
b := make([]byte, binary.MaxVarintLen64)
|
||||
n := binary.PutUvarint(b, id)
|
||||
h.Write(b[:n])
|
||||
h.Write([]byte("\n"))
|
||||
h.Write([]byte(fk))
|
||||
h.Write([]byte("\n"))
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
func hashIDWithTagKeysOnly(m telegraf.Metric) uint64 {
|
||||
h := fnv.New64a()
|
||||
h.Write([]byte(m.Name()))
|
||||
h.Write([]byte("\n"))
|
||||
for _, tag := range m.TagList() {
|
||||
if tag.Key == "" || tag.Value == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
h.Write([]byte(tag.Key))
|
||||
h.Write([]byte("\n"))
|
||||
}
|
||||
b := make([]byte, binary.MaxVarintLen64)
|
||||
n := binary.PutUvarint(b, uint64(m.Time().UnixNano()))
|
||||
h.Write(b[:n])
|
||||
h.Write([]byte("\n"))
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) {
|
||||
dimensionNames := make([]string, 0, len(m.TagList()))
|
||||
dimensionValues := make([]string, 0, len(m.TagList()))
|
||||
for _, tag := range m.TagList() {
|
||||
// Azure custom metrics service supports up to 10 dimensions
|
||||
if len(dimensionNames) >= 10 {
|
||||
continue
|
||||
}
|
||||
|
||||
if tag.Key == "" || tag.Value == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
dimensionNames = append(dimensionNames, tag.Key)
|
||||
dimensionValues = append(dimensionValues, tag.Value)
|
||||
}
|
||||
|
||||
vmin, err := getFloatField(m, "min")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vmax, err := getFloatField(m, "max")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vsum, err := getFloatField(m, "sum")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vcount, err := getIntField(m, "count")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mn, ns := "Missing", "Missing"
|
||||
names := strings.SplitN(m.Name(), "-", 2)
|
||||
if len(names) > 1 {
|
||||
mn = names[1]
|
||||
}
|
||||
if len(names) > 0 {
|
||||
ns = names[0]
|
||||
}
|
||||
ns = prefix + ns
|
||||
|
||||
return &azureMonitorMetric{
|
||||
Time: m.Time(),
|
||||
Data: &azureMonitorData{
|
||||
BaseData: &azureMonitorBaseData{
|
||||
Metric: mn,
|
||||
Namespace: ns,
|
||||
DimensionNames: dimensionNames,
|
||||
Series: []*azureMonitorSeries{
|
||||
{
|
||||
DimensionValues: dimensionValues,
|
||||
Min: vmin,
|
||||
Max: vmax,
|
||||
Sum: vsum,
|
||||
Count: vcount,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getFloatField(m telegraf.Metric, key string) (float64, error) {
|
||||
fv, ok := m.GetField(key)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("missing field: %s", key)
|
||||
}
|
||||
|
||||
if value, ok := fv.(float64); ok {
|
||||
return value, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unexpected type: %s: %T", key, fv)
|
||||
}
|
||||
|
||||
func getIntField(m telegraf.Metric, key string) (int64, error) {
|
||||
fv, ok := m.GetField(key)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("missing field: %s", key)
|
||||
}
|
||||
|
||||
if value, ok := fv.(int64); ok {
|
||||
return value, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unexpected type: %s: %T", key, fv)
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("azure_monitor", func() telegraf.Output {
|
||||
return &AzureMonitor{
|
||||
NamespacePrefix: "Telegraf/",
|
||||
TimestampLimitPast: config.Duration(20 * time.Minute),
|
||||
TimestampLimitFuture: config.Duration(-1 * time.Minute),
|
||||
Timeout: config.Duration(5 * time.Second),
|
||||
timeFunc: time.Now,
|
||||
}
|
||||
})
|
||||
}
|
619
plugins/outputs/azure_monitor/azure_monitor_test.go
Normal file
619
plugins/outputs/azure_monitor/azure_monitor_test.go
Normal file
|
@ -0,0 +1,619 @@
|
|||
package azure_monitor
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestAggregate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
stringdim bool
|
||||
metrics []telegraf.Metric
|
||||
addTime time.Time
|
||||
pushTime time.Time
|
||||
expected []telegraf.Metric
|
||||
expectedOutsideWindow int64
|
||||
}{
|
||||
{
|
||||
name: "add metric outside window is dropped",
|
||||
metrics: []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"cpu",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value": 42,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
},
|
||||
addTime: time.Unix(3600, 0),
|
||||
pushTime: time.Unix(3600, 0),
|
||||
expectedOutsideWindow: 1,
|
||||
},
|
||||
{
|
||||
name: "metric not sent until period expires",
|
||||
metrics: []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"cpu",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value": 42,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
},
|
||||
addTime: time.Unix(0, 0),
|
||||
pushTime: time.Unix(0, 0),
|
||||
},
|
||||
{
|
||||
name: "add strings as dimensions",
|
||||
stringdim: true,
|
||||
metrics: []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"host": "localhost",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 42,
|
||||
"message": "howdy",
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
},
|
||||
addTime: time.Unix(0, 0),
|
||||
pushTime: time.Unix(3600, 0),
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"cpu-value",
|
||||
map[string]string{
|
||||
"host": "localhost",
|
||||
"message": "howdy",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"min": 42.0,
|
||||
"max": 42.0,
|
||||
"sum": 42.0,
|
||||
"count": 1,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add metric to cache and push",
|
||||
metrics: []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"cpu",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value": 42,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
},
|
||||
addTime: time.Unix(0, 0),
|
||||
pushTime: time.Unix(3600, 0),
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"cpu-value",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"min": 42.0,
|
||||
"max": 42.0,
|
||||
"sum": 42.0,
|
||||
"count": 1,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "added metric are aggregated",
|
||||
metrics: []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"cpu",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value": 42,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
testutil.MustMetric(
|
||||
"cpu",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value": 84,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
testutil.MustMetric(
|
||||
"cpu",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value": 2,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
},
|
||||
addTime: time.Unix(0, 0),
|
||||
pushTime: time.Unix(3600, 0),
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"cpu-value",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"min": 2.0,
|
||||
"max": 84.0,
|
||||
"sum": 128.0,
|
||||
"count": 3,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
msiEndpoint, err := adal.GetMSIVMEndpoint()
|
||||
require.NoError(t, err)
|
||||
t.Setenv("MSI_ENDPOINT", msiEndpoint)
|
||||
|
||||
// Setup plugin
|
||||
plugin := &AzureMonitor{
|
||||
Region: "test",
|
||||
ResourceID: "/test",
|
||||
StringsAsDimensions: tt.stringdim,
|
||||
TimestampLimitPast: config.Duration(30 * time.Minute),
|
||||
TimestampLimitFuture: config.Duration(-1 * time.Minute),
|
||||
Log: testutil.Logger{},
|
||||
timeFunc: func() time.Time { return tt.addTime },
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
require.NoError(t, plugin.Connect())
|
||||
defer plugin.Close()
|
||||
|
||||
// Reset statistics
|
||||
plugin.MetricOutsideWindow.Set(0)
|
||||
|
||||
// Add the data
|
||||
for _, m := range tt.metrics {
|
||||
plugin.Add(m)
|
||||
}
|
||||
|
||||
// Push out the data at a later time
|
||||
plugin.timeFunc = func() time.Time { return tt.pushTime }
|
||||
metrics := plugin.Push()
|
||||
plugin.Reset()
|
||||
|
||||
// Check the results
|
||||
require.Equal(t, tt.expectedOutsideWindow, plugin.MetricOutsideWindow.Get())
|
||||
testutil.RequireMetricsEqual(t, tt.expected, metrics)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
// Set up a fake environment for Authorizer
|
||||
// This used to fake an MSI environment, but since https://github.com/Azure/go-autorest/pull/670/files it's no longer possible,
|
||||
// So we fake a user/password authentication
|
||||
t.Setenv("AZURE_CLIENT_ID", "fake")
|
||||
t.Setenv("AZURE_USERNAME", "fake")
|
||||
t.Setenv("AZURE_PASSWORD", "fake")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
metrics []telegraf.Metric
|
||||
expectedCalls uint64
|
||||
expectedMetrics uint64
|
||||
errmsg string
|
||||
}{
|
||||
{
|
||||
name: "if not an azure metric nothing is sent",
|
||||
metrics: []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"cpu",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value": 42,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
},
|
||||
errmsg: "translating metric(s) failed",
|
||||
},
|
||||
{
|
||||
name: "single azure metric",
|
||||
metrics: []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"cpu-value",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
},
|
||||
expectedCalls: 1,
|
||||
expectedMetrics: 1,
|
||||
},
|
||||
{
|
||||
name: "multiple azure metric",
|
||||
metrics: []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"cpu-value",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
testutil.MustMetric(
|
||||
"cpu-value",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
time.Unix(60, 0),
|
||||
),
|
||||
},
|
||||
expectedCalls: 1,
|
||||
expectedMetrics: 2,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Setup test server to collect the sent metrics
|
||||
var calls atomic.Uint64
|
||||
var metrics atomic.Uint64
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
calls.Add(1)
|
||||
|
||||
gz, err := gzip.NewReader(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
t.Logf("cannot create gzip reader: %v", err)
|
||||
t.Fail()
|
||||
return
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(gz)
|
||||
for scanner.Scan() {
|
||||
var m azureMonitorMetric
|
||||
if err := json.Unmarshal(scanner.Bytes(), &m); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
t.Logf("cannot unmarshal JSON: %v", err)
|
||||
t.Fail()
|
||||
return
|
||||
}
|
||||
metrics.Add(1)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
// Setup the plugin
|
||||
plugin := AzureMonitor{
|
||||
EndpointURL: "http://" + ts.Listener.Addr().String(),
|
||||
Region: "test",
|
||||
ResourceID: "/test",
|
||||
TimestampLimitPast: config.Duration(30 * time.Minute),
|
||||
TimestampLimitFuture: config.Duration(-1 * time.Minute),
|
||||
Log: testutil.Logger{},
|
||||
timeFunc: func() time.Time { return time.Unix(120, 0) },
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
|
||||
// Override with testing setup
|
||||
plugin.preparer = autorest.CreatePreparer(autorest.NullAuthorizer{}.WithAuthorization())
|
||||
require.NoError(t, plugin.Connect())
|
||||
defer plugin.Close()
|
||||
|
||||
err := plugin.Write(tt.metrics)
|
||||
if tt.errmsg != "" {
|
||||
require.ErrorContains(t, err, tt.errmsg)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedCalls, calls.Load())
|
||||
require.Equal(t, tt.expectedMetrics, metrics.Load())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteTimelimits(t *testing.T) {
|
||||
// Set up a fake environment for Authorizer
|
||||
// This used to fake an MSI environment, but since https://github.com/Azure/go-autorest/pull/670/files it's no longer possible,
|
||||
// So we fake a user/password authentication
|
||||
t.Setenv("AZURE_CLIENT_ID", "fake")
|
||||
t.Setenv("AZURE_USERNAME", "fake")
|
||||
t.Setenv("AZURE_PASSWORD", "fake")
|
||||
|
||||
// Setup input metrics
|
||||
tref := time.Now().Truncate(time.Minute)
|
||||
inputs := []telegraf.Metric{
|
||||
metric.New(
|
||||
"cpu-value",
|
||||
map[string]string{
|
||||
"status": "too old",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
tref.Add(-time.Hour),
|
||||
),
|
||||
metric.New(
|
||||
"cpu-value",
|
||||
map[string]string{
|
||||
"status": "30 min in the past",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
tref.Add(-30*time.Minute),
|
||||
),
|
||||
metric.New(
|
||||
"cpu-value",
|
||||
map[string]string{
|
||||
"status": "20 min in the past",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
tref.Add(-20*time.Minute),
|
||||
),
|
||||
metric.New(
|
||||
"cpu-value",
|
||||
map[string]string{
|
||||
"status": "10 min in the past",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
tref.Add(-10*time.Minute),
|
||||
),
|
||||
metric.New(
|
||||
"cpu-value",
|
||||
map[string]string{
|
||||
"status": "now",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
tref,
|
||||
),
|
||||
metric.New(
|
||||
"cpu-value",
|
||||
map[string]string{
|
||||
"status": "1 min in the future",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
tref.Add(1*time.Minute),
|
||||
),
|
||||
metric.New(
|
||||
"cpu-value",
|
||||
map[string]string{
|
||||
"status": "2 min in the future",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
tref.Add(2*time.Minute),
|
||||
),
|
||||
metric.New(
|
||||
"cpu-value",
|
||||
map[string]string{
|
||||
"status": "4 min in the future",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
tref.Add(4*time.Minute),
|
||||
),
|
||||
metric.New(
|
||||
"cpu-value",
|
||||
map[string]string{
|
||||
"status": "5 min in the future",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
tref.Add(5*time.Minute),
|
||||
),
|
||||
metric.New(
|
||||
"cpu-value",
|
||||
map[string]string{
|
||||
"status": "too far in the future",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"min": float64(42),
|
||||
"max": float64(42),
|
||||
"sum": float64(42),
|
||||
"count": int64(1),
|
||||
},
|
||||
tref.Add(time.Hour),
|
||||
),
|
||||
}
|
||||
|
||||
// Error message for status 400
|
||||
msg := `{"error":{"code":"BadRequest","message":"'time' should not be older than 30 minutes and not more than 4 minutes in the future\r\n"}}`
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input []telegraf.Metric
|
||||
limitPast time.Duration
|
||||
limitFuture time.Duration
|
||||
expectedCount int
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "only good metrics",
|
||||
input: inputs[1 : len(inputs)-2],
|
||||
limitPast: 48 * time.Hour,
|
||||
limitFuture: 48 * time.Hour,
|
||||
expectedCount: len(inputs) - 3,
|
||||
},
|
||||
{
|
||||
name: "metrics out of bounds",
|
||||
input: inputs,
|
||||
limitPast: 48 * time.Hour,
|
||||
limitFuture: 48 * time.Hour,
|
||||
expectedCount: len(inputs),
|
||||
expectedError: "400 Bad Request: " + msg,
|
||||
},
|
||||
{
|
||||
name: "default limit",
|
||||
input: inputs,
|
||||
limitPast: 20 * time.Minute,
|
||||
limitFuture: -1 * time.Minute,
|
||||
expectedCount: 2,
|
||||
expectedError: "metric(s) outside of acceptable time window",
|
||||
},
|
||||
{
|
||||
name: "permissive limit",
|
||||
input: inputs,
|
||||
limitPast: 30 * time.Minute,
|
||||
limitFuture: 5 * time.Minute,
|
||||
expectedCount: len(inputs) - 2,
|
||||
expectedError: "metric(s) outside of acceptable time window",
|
||||
},
|
||||
{
|
||||
name: "very strict",
|
||||
input: inputs,
|
||||
limitPast: 19*time.Minute + 59*time.Second,
|
||||
limitFuture: 3*time.Minute + 59*time.Second,
|
||||
expectedCount: len(inputs) - 6,
|
||||
expectedError: "metric(s) outside of acceptable time window",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Counter for the number of received metrics
|
||||
var count atomic.Int32
|
||||
|
||||
// Setup test server
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
|
||||
reader, err := gzip.NewReader(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
t.Logf("unzipping content failed: %v", err)
|
||||
t.Fail()
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
status := http.StatusOK
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
var data map[string]interface{}
|
||||
if err := json.Unmarshal(scanner.Bytes(), &data); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
t.Logf("decoding JSON failed: %v", err)
|
||||
t.Fail()
|
||||
return
|
||||
}
|
||||
|
||||
timestamp, err := time.Parse(time.RFC3339, data["time"].(string))
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
t.Logf("decoding time failed: %v", err)
|
||||
t.Fail()
|
||||
return
|
||||
}
|
||||
if timestamp.Before(tref.Add(-30*time.Minute)) || timestamp.After(tref.Add(5*time.Minute)) {
|
||||
status = http.StatusBadRequest
|
||||
}
|
||||
count.Add(1)
|
||||
}
|
||||
w.WriteHeader(status)
|
||||
if status == 400 {
|
||||
//nolint:errcheck // Ignoring returned error as it is not relevant for the test
|
||||
w.Write([]byte(msg))
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
// Setup plugin
|
||||
plugin := AzureMonitor{
|
||||
EndpointURL: "http://" + ts.Listener.Addr().String(),
|
||||
Region: "test",
|
||||
ResourceID: "/test",
|
||||
TimestampLimitPast: config.Duration(tt.limitPast),
|
||||
TimestampLimitFuture: config.Duration(tt.limitFuture),
|
||||
Log: testutil.Logger{},
|
||||
timeFunc: func() time.Time { return tref },
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
|
||||
// Override with testing setup
|
||||
plugin.preparer = autorest.CreatePreparer(autorest.NullAuthorizer{}.WithAuthorization())
|
||||
require.NoError(t, plugin.Connect())
|
||||
defer plugin.Close()
|
||||
|
||||
// Test writing
|
||||
err := plugin.Write(tt.input)
|
||||
if tt.expectedError == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.ErrorContains(t, err, tt.expectedError)
|
||||
}
|
||||
require.Equal(t, tt.expectedCount, int(count.Load()))
|
||||
})
|
||||
}
|
||||
}
|
37
plugins/outputs/azure_monitor/sample.conf
Normal file
37
plugins/outputs/azure_monitor/sample.conf
Normal file
|
@ -0,0 +1,37 @@
|
|||
# Send aggregate metrics to Azure Monitor
|
||||
[[outputs.azure_monitor]]
|
||||
## Timeout for HTTP writes.
|
||||
# timeout = "20s"
|
||||
|
||||
## Set the namespace prefix, defaults to "Telegraf/<input-name>".
|
||||
# namespace_prefix = "Telegraf/"
|
||||
|
||||
## Azure Monitor doesn't have a string value type, so convert string
|
||||
## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows
|
||||
## a maximum of 10 dimensions so Telegraf will only send the first 10
|
||||
## alphanumeric dimensions.
|
||||
# strings_as_dimensions = false
|
||||
|
||||
## Both region and resource_id must be set or be available via the
|
||||
## Instance Metadata service on Azure Virtual Machines.
|
||||
#
|
||||
## Azure Region to publish metrics against.
|
||||
## ex: region = "southcentralus"
|
||||
# region = ""
|
||||
#
|
||||
## The Azure Resource ID against which metric will be logged, e.g.
|
||||
## ex: resource_id = "/subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.Compute/virtualMachines/<vm_name>"
|
||||
# resource_id = ""
|
||||
|
||||
## Optionally, if in Azure US Government, China, or other sovereign
|
||||
## cloud environment, set the appropriate REST endpoint for receiving
|
||||
## metrics. (Note: region may be unused in this context)
|
||||
# endpoint_url = "https://monitoring.core.usgovcloudapi.net"
|
||||
|
||||
## Time limitations of metric to send
|
||||
## Documentation can be found here:
|
||||
## https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-store-custom-rest-api?tabs=rest#timestamp
|
||||
## However, the returned (400) error message might document more strict or
|
||||
## relaxed settings. By default, only past metrics witin the limit are sent.
|
||||
# timestamp_limit_past = "30m"
|
||||
# timestamp_limit_future = "-1m"
|
60
plugins/outputs/azure_monitor/types.go
Normal file
60
plugins/outputs/azure_monitor/types.go
Normal file
|
@ -0,0 +1,60 @@
|
|||
package azure_monitor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type azureMonitorMetric struct {
|
||||
Time time.Time `json:"time"`
|
||||
Data *azureMonitorData `json:"data"`
|
||||
index int
|
||||
}
|
||||
|
||||
type azureMonitorData struct {
|
||||
BaseData *azureMonitorBaseData `json:"baseData"`
|
||||
}
|
||||
|
||||
type azureMonitorBaseData struct {
|
||||
Metric string `json:"metric"`
|
||||
Namespace string `json:"namespace"`
|
||||
DimensionNames []string `json:"dimNames"`
|
||||
Series []*azureMonitorSeries `json:"series"`
|
||||
}
|
||||
|
||||
type azureMonitorSeries struct {
|
||||
DimensionValues []string `json:"dimValues"`
|
||||
Min float64 `json:"min"`
|
||||
Max float64 `json:"max"`
|
||||
Sum float64 `json:"sum"`
|
||||
Count int64 `json:"count"`
|
||||
}
|
||||
|
||||
// VirtualMachineMetadata contains information about a VM from the metadata service
|
||||
type virtualMachineMetadata struct {
|
||||
Compute struct {
|
||||
Location string `json:"location"`
|
||||
Name string `json:"name"`
|
||||
ResourceGroupName string `json:"resourceGroupName"`
|
||||
SubscriptionID string `json:"subscriptionId"`
|
||||
VMScaleSetName string `json:"vmScaleSetName"`
|
||||
} `json:"compute"`
|
||||
}
|
||||
|
||||
func (m *virtualMachineMetadata) ResourceID() string {
|
||||
if m.Compute.VMScaleSetName != "" {
|
||||
return fmt.Sprintf(
|
||||
resourceIDScaleSetTemplate,
|
||||
m.Compute.SubscriptionID,
|
||||
m.Compute.ResourceGroupName,
|
||||
m.Compute.VMScaleSetName,
|
||||
)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
resourceIDTemplate,
|
||||
m.Compute.SubscriptionID,
|
||||
m.Compute.ResourceGroupName,
|
||||
m.Compute.Name,
|
||||
)
|
||||
}
|
126
plugins/outputs/bigquery/README.md
Normal file
126
plugins/outputs/bigquery/README.md
Normal file
|
@ -0,0 +1,126 @@
|
|||
# Google BigQuery Output Plugin
|
||||
|
||||
This plugin writes metrics to the [Google Cloud BigQuery][big_query] service
|
||||
and requires [authentication][authentication] with Google Cloud using either a
|
||||
service account or user credentials.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Be aware that this plugin accesses APIs that are [chargeable][pricing] and
|
||||
> might incur costs.
|
||||
|
||||
[authentication]: https://cloud.google.com/bigquery/docs/authentication
|
||||
[big_query]: https://cloud.google.com/bigquery
|
||||
[pricing]: https://cloud.google.com/bigquery/pricing
|
||||
|
||||
⭐ Telegraf v1.18.0
|
||||
🏷️ cloud, datastore
|
||||
💻 all
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Configuration for Google Cloud BigQuery to send entries
|
||||
[[outputs.bigquery]]
|
||||
## Credentials File
|
||||
credentials_file = "/path/to/service/account/key.json"
|
||||
|
||||
## Google Cloud Platform Project
|
||||
# project = ""
|
||||
|
||||
## The namespace for the metric descriptor
|
||||
dataset = "telegraf"
|
||||
|
||||
## Timeout for BigQuery operations.
|
||||
# timeout = "5s"
|
||||
|
||||
## Character to replace hyphens on Metric name
|
||||
# replace_hyphen_to = "_"
|
||||
|
||||
## Write all metrics in a single compact table
|
||||
# compact_table = ""
|
||||
```
|
||||
|
||||
Leaving `project` empty indicates the plugin will try to retrieve the project
|
||||
from the credentials file.
|
||||
|
||||
Requires `dataset` to specify under which BigQuery dataset the corresponding
|
||||
metrics tables reside.
|
||||
|
||||
Each metric should have a corresponding table to BigQuery. The schema of the
|
||||
table on BigQuery:
|
||||
|
||||
* Should contain the field `timestamp` which is the timestamp of a telegraph
|
||||
metrics
|
||||
* Should contain the metric's tags with the same name and the column type should
|
||||
be set to string.
|
||||
* Should contain the metric's fields with the same name and the column type
|
||||
should match the field type.
|
||||
|
||||
## Compact table
|
||||
|
||||
When enabling the compact table, all metrics are inserted to the given table
|
||||
with the following schema:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"mode": "REQUIRED",
|
||||
"name": "timestamp",
|
||||
"type": "TIMESTAMP"
|
||||
},
|
||||
{
|
||||
"mode": "REQUIRED",
|
||||
"name": "name",
|
||||
"type": "STRING"
|
||||
},
|
||||
{
|
||||
"mode": "REQUIRED",
|
||||
"name": "tags",
|
||||
"type": "JSON"
|
||||
},
|
||||
{
|
||||
"mode": "REQUIRED",
|
||||
"name": "fields",
|
||||
"type": "JSON"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Restrictions
|
||||
|
||||
Avoid hyphens on BigQuery tables, underlying SDK cannot handle streaming inserts
|
||||
to Table with hyphens.
|
||||
|
||||
In cases of metrics with hyphens please use the [Rename Processor
|
||||
Plugin][rename].
|
||||
|
||||
In case of a metric with hyphen by default hyphens shall be replaced with
|
||||
underscores (_). This can be altered using the `replace_hyphen_to`
|
||||
configuration property.
|
||||
|
||||
Available data type options are:
|
||||
|
||||
* integer
|
||||
* float or long
|
||||
* string
|
||||
* boolean
|
||||
|
||||
All field naming restrictions that apply to BigQuery should apply to the
|
||||
measurements to be imported.
|
||||
|
||||
Tables on BigQuery should be created beforehand and they are not created during
|
||||
persistence
|
||||
|
||||
Pay attention to the column `timestamp` since it is reserved upfront and cannot
|
||||
change. If partitioning is required make sure it is applied beforehand.
|
||||
|
||||
[rename]: ../../processors/rename/README.md
|
323
plugins/outputs/bigquery/bigquery.go
Normal file
323
plugins/outputs/bigquery/bigquery.go
Normal file
|
@ -0,0 +1,323 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/option"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
const timeStampFieldName = "timestamp"
|
||||
|
||||
var defaultTimeout = config.Duration(5 * time.Second)
|
||||
|
||||
type BigQuery struct {
|
||||
CredentialsFile string `toml:"credentials_file"`
|
||||
Project string `toml:"project"`
|
||||
Dataset string `toml:"dataset"`
|
||||
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
ReplaceHyphenTo string `toml:"replace_hyphen_to"`
|
||||
CompactTable string `toml:"compact_table"`
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
client *bigquery.Client
|
||||
|
||||
warnedOnHyphens map[string]bool
|
||||
}
|
||||
|
||||
func (*BigQuery) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (b *BigQuery) Init() error {
|
||||
if b.Project == "" {
|
||||
b.Project = bigquery.DetectProjectID
|
||||
}
|
||||
|
||||
if b.Dataset == "" {
|
||||
return errors.New(`"dataset" is required`)
|
||||
}
|
||||
|
||||
b.warnedOnHyphens = make(map[string]bool)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BigQuery) Connect() error {
|
||||
if b.client == nil {
|
||||
if err := b.setUpDefaultClient(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if b.CompactTable != "" {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Duration(b.Timeout))
|
||||
defer cancel()
|
||||
|
||||
// Check if the compact table exists
|
||||
_, err := b.client.Dataset(b.Dataset).Table(b.CompactTable).Metadata(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compact table: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BigQuery) setUpDefaultClient() error {
|
||||
var credentialsOption option.ClientOption
|
||||
|
||||
// https://cloud.google.com/go/docs/reference/cloud.google.com/go/0.94.1#hdr-Timeouts_and_Cancellation
|
||||
// Do not attempt to add timeout to this context for the bigquery client.
|
||||
ctx := context.Background()
|
||||
|
||||
if b.CredentialsFile != "" {
|
||||
credentialsOption = option.WithCredentialsFile(b.CredentialsFile)
|
||||
} else {
|
||||
creds, err := google.FindDefaultCredentials(ctx, bigquery.Scope)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"unable to find Google Cloud Platform Application Default Credentials: %w. "+
|
||||
"Either set ADC or provide CredentialsFile config", err)
|
||||
}
|
||||
credentialsOption = option.WithCredentials(creds)
|
||||
}
|
||||
|
||||
client, err := bigquery.NewClient(ctx, b.Project,
|
||||
credentialsOption,
|
||||
option.WithUserAgent(internal.ProductToken()),
|
||||
)
|
||||
b.client = client
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the metrics to Google Cloud BigQuery.
|
||||
func (b *BigQuery) Write(metrics []telegraf.Metric) error {
|
||||
if b.CompactTable != "" {
|
||||
return b.writeCompact(metrics)
|
||||
}
|
||||
|
||||
groupedMetrics := groupByMetricName(metrics)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for k, v := range groupedMetrics {
|
||||
wg.Add(1)
|
||||
go func(k string, v []bigquery.ValueSaver) {
|
||||
defer wg.Done()
|
||||
b.insertToTable(k, v)
|
||||
}(k, v)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BigQuery) writeCompact(metrics []telegraf.Metric) error {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Duration(b.Timeout))
|
||||
defer cancel()
|
||||
|
||||
// Always returns an instance, even if table doesn't exist (anymore).
|
||||
inserter := b.client.Dataset(b.Dataset).Table(b.CompactTable).Inserter()
|
||||
|
||||
var compactValues []*bigquery.ValuesSaver
|
||||
for _, m := range metrics {
|
||||
valueSaver, err := b.newCompactValuesSaver(m)
|
||||
if err != nil {
|
||||
b.Log.Warnf("could not prepare metric as compact value: %v", err)
|
||||
} else {
|
||||
compactValues = append(compactValues, valueSaver)
|
||||
}
|
||||
}
|
||||
return inserter.Put(ctx, compactValues)
|
||||
}
|
||||
|
||||
func groupByMetricName(metrics []telegraf.Metric) map[string][]bigquery.ValueSaver {
|
||||
groupedMetrics := make(map[string][]bigquery.ValueSaver)
|
||||
|
||||
for _, m := range metrics {
|
||||
bqm := newValuesSaver(m)
|
||||
groupedMetrics[m.Name()] = append(groupedMetrics[m.Name()], bqm)
|
||||
}
|
||||
|
||||
return groupedMetrics
|
||||
}
|
||||
|
||||
func newValuesSaver(m telegraf.Metric) *bigquery.ValuesSaver {
|
||||
s := make(bigquery.Schema, 0)
|
||||
r := make([]bigquery.Value, 0)
|
||||
timeSchema := timeStampFieldSchema()
|
||||
s = append(s, timeSchema)
|
||||
r = append(r, m.Time())
|
||||
|
||||
s, r = tagsSchemaAndValues(m, s, r)
|
||||
s, r = valuesSchemaAndValues(m, s, r)
|
||||
|
||||
return &bigquery.ValuesSaver{
|
||||
Schema: s.Relax(),
|
||||
Row: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BigQuery) newCompactValuesSaver(m telegraf.Metric) (*bigquery.ValuesSaver, error) {
|
||||
tags, err := json.Marshal(m.Tags())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("serializing tags: %w", err)
|
||||
}
|
||||
|
||||
rawFields := make(map[string]interface{}, len(m.FieldList()))
|
||||
for _, field := range m.FieldList() {
|
||||
if fv, ok := field.Value.(float64); ok {
|
||||
// JSON does not support these special values
|
||||
if math.IsNaN(fv) || math.IsInf(fv, 0) {
|
||||
b.Log.Debugf("Ignoring unsupported field %s with value %q for metric %s", field.Key, field.Value, m.Name())
|
||||
continue
|
||||
}
|
||||
}
|
||||
rawFields[field.Key] = field.Value
|
||||
}
|
||||
fields, err := json.Marshal(rawFields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("serializing fields: %w", err)
|
||||
}
|
||||
|
||||
return &bigquery.ValuesSaver{
|
||||
Schema: bigquery.Schema{
|
||||
timeStampFieldSchema(),
|
||||
newStringFieldSchema("name"),
|
||||
newJSONFieldSchema("tags"),
|
||||
newJSONFieldSchema("fields"),
|
||||
},
|
||||
Row: []bigquery.Value{
|
||||
m.Time(),
|
||||
m.Name(),
|
||||
string(tags),
|
||||
string(fields),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func timeStampFieldSchema() *bigquery.FieldSchema {
|
||||
return &bigquery.FieldSchema{
|
||||
Name: timeStampFieldName,
|
||||
Type: bigquery.TimestampFieldType,
|
||||
}
|
||||
}
|
||||
|
||||
func newStringFieldSchema(name string) *bigquery.FieldSchema {
|
||||
return &bigquery.FieldSchema{
|
||||
Name: name,
|
||||
Type: bigquery.StringFieldType,
|
||||
}
|
||||
}
|
||||
|
||||
func newJSONFieldSchema(name string) *bigquery.FieldSchema {
|
||||
return &bigquery.FieldSchema{
|
||||
Name: name,
|
||||
Type: bigquery.JSONFieldType,
|
||||
}
|
||||
}
|
||||
|
||||
func tagsSchemaAndValues(m telegraf.Metric, s bigquery.Schema, r []bigquery.Value) ([]*bigquery.FieldSchema, []bigquery.Value) {
|
||||
for _, t := range m.TagList() {
|
||||
s = append(s, newStringFieldSchema(t.Key))
|
||||
r = append(r, t.Value)
|
||||
}
|
||||
|
||||
return s, r
|
||||
}
|
||||
|
||||
func valuesSchemaAndValues(m telegraf.Metric, s bigquery.Schema, r []bigquery.Value) ([]*bigquery.FieldSchema, []bigquery.Value) {
|
||||
for _, f := range m.FieldList() {
|
||||
s = append(s, valuesSchema(f))
|
||||
r = append(r, f.Value)
|
||||
}
|
||||
|
||||
return s, r
|
||||
}
|
||||
|
||||
func valuesSchema(f *telegraf.Field) *bigquery.FieldSchema {
|
||||
return &bigquery.FieldSchema{
|
||||
Name: f.Key,
|
||||
Type: valueToBqType(f.Value),
|
||||
}
|
||||
}
|
||||
|
||||
func valueToBqType(v interface{}) bigquery.FieldType {
|
||||
switch reflect.ValueOf(v).Kind() {
|
||||
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return bigquery.IntegerFieldType
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return bigquery.FloatFieldType
|
||||
case reflect.Bool:
|
||||
return bigquery.BooleanFieldType
|
||||
default:
|
||||
return bigquery.StringFieldType
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BigQuery) insertToTable(metricName string, metrics []bigquery.ValueSaver) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Duration(b.Timeout))
|
||||
defer cancel()
|
||||
|
||||
tableName := b.metricToTable(metricName)
|
||||
table := b.client.Dataset(b.Dataset).Table(tableName)
|
||||
inserter := table.Inserter()
|
||||
|
||||
if err := inserter.Put(ctx, metrics); err != nil {
|
||||
b.Log.Errorf("inserting metric %q failed: %v", metricName, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BigQuery) metricToTable(metricName string) string {
|
||||
if !strings.Contains(metricName, "-") {
|
||||
return metricName
|
||||
}
|
||||
|
||||
dhm := strings.ReplaceAll(metricName, "-", b.ReplaceHyphenTo)
|
||||
|
||||
if warned := b.warnedOnHyphens[metricName]; !warned {
|
||||
b.Log.Warnf("Metric %q contains hyphens please consider using the rename processor plugin, falling back to %q", metricName, dhm)
|
||||
b.warnedOnHyphens[metricName] = true
|
||||
}
|
||||
|
||||
return dhm
|
||||
}
|
||||
|
||||
// Close will terminate the session to the backend, returning error if an issue arises.
|
||||
func (b *BigQuery) Close() error {
|
||||
return b.client.Close()
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("bigquery", func() telegraf.Output {
|
||||
return &BigQuery{
|
||||
Timeout: defaultTimeout,
|
||||
ReplaceHyphenTo: "_",
|
||||
}
|
||||
})
|
||||
}
|
304
plugins/outputs/bigquery/bigquery_test.go
Normal file
304
plugins/outputs/bigquery/bigquery_test.go
Normal file
|
@ -0,0 +1,304 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
const (
|
||||
successfulResponse = `{"kind": "bigquery#tableDataInsertAllResponse"}`
|
||||
)
|
||||
|
||||
var receivedBody map[string]json.RawMessage
|
||||
|
||||
type Row struct {
|
||||
Tag1 string `json:"tag1"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
errorString string
|
||||
plugin *BigQuery
|
||||
}{
|
||||
{
|
||||
name: "dataset is not set",
|
||||
errorString: `"dataset" is required`,
|
||||
plugin: &BigQuery{},
|
||||
},
|
||||
{
|
||||
name: "valid config",
|
||||
plugin: &BigQuery{
|
||||
Dataset: "test-dataset",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.errorString != "" {
|
||||
require.EqualError(t, tt.plugin.Init(), tt.errorString)
|
||||
} else {
|
||||
require.NoError(t, tt.plugin.Init())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricToTable(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
replaceHyphenTo string
|
||||
metricName string
|
||||
expectedTable string
|
||||
}{
|
||||
{
|
||||
name: "no rename",
|
||||
replaceHyphenTo: "_",
|
||||
metricName: "test",
|
||||
expectedTable: "test",
|
||||
},
|
||||
{
|
||||
name: "default config",
|
||||
replaceHyphenTo: "_",
|
||||
metricName: "table-with-hyphens",
|
||||
expectedTable: "table_with_hyphens",
|
||||
},
|
||||
{
|
||||
name: "custom hyphens",
|
||||
replaceHyphenTo: "*",
|
||||
metricName: "table-with-hyphens",
|
||||
expectedTable: "table*with*hyphens",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := &BigQuery{
|
||||
Dataset: "test-dataset",
|
||||
ReplaceHyphenTo: tt.replaceHyphenTo,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, b.Init())
|
||||
|
||||
require.Equal(t, tt.expectedTable, b.metricToTable(tt.metricName))
|
||||
if tt.metricName != tt.expectedTable {
|
||||
require.Contains(t, b.warnedOnHyphens, tt.metricName)
|
||||
require.True(t, b.warnedOnHyphens[tt.metricName])
|
||||
} else {
|
||||
require.NotContains(t, b.warnedOnHyphens, tt.metricName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnect(t *testing.T) {
|
||||
srv := localBigQueryServer(t)
|
||||
defer srv.Close()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
compactTable string
|
||||
errorString string
|
||||
}{
|
||||
{name: "normal"},
|
||||
{
|
||||
name: "compact table existing",
|
||||
compactTable: "test-metrics",
|
||||
},
|
||||
{
|
||||
name: "compact table not existing",
|
||||
compactTable: "foobar",
|
||||
errorString: "compact table: googleapi: got HTTP response code 404",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := &BigQuery{
|
||||
Project: "test-project",
|
||||
Dataset: "test-dataset",
|
||||
Timeout: defaultTimeout,
|
||||
CompactTable: tt.compactTable,
|
||||
}
|
||||
|
||||
require.NoError(t, b.Init())
|
||||
require.NoError(t, b.setUpTestClient(srv.URL))
|
||||
|
||||
if tt.errorString != "" {
|
||||
require.ErrorContains(t, b.Connect(), tt.errorString)
|
||||
} else {
|
||||
require.NoError(t, b.Connect())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
srv := localBigQueryServer(t)
|
||||
defer srv.Close()
|
||||
|
||||
b := &BigQuery{
|
||||
Project: "test-project",
|
||||
Dataset: "test-dataset",
|
||||
Timeout: defaultTimeout,
|
||||
}
|
||||
|
||||
mockMetrics := testutil.MockMetrics()
|
||||
|
||||
require.NoError(t, b.Init())
|
||||
require.NoError(t, b.setUpTestClient(srv.URL))
|
||||
require.NoError(t, b.Connect())
|
||||
|
||||
require.NoError(t, b.Write(mockMetrics))
|
||||
|
||||
var rows []map[string]json.RawMessage
|
||||
require.NoError(t, json.Unmarshal(receivedBody["rows"], &rows))
|
||||
|
||||
var row Row
|
||||
require.NoError(t, json.Unmarshal(rows[0]["json"], &row))
|
||||
|
||||
pt, err := time.Parse(time.RFC3339, row.Timestamp)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, mockMetrics[0].Tags()["tag1"], row.Tag1)
|
||||
require.Equal(t, mockMetrics[0].Time(), pt)
|
||||
require.InDelta(t, mockMetrics[0].Fields()["value"], row.Value, testutil.DefaultDelta)
|
||||
}
|
||||
|
||||
func TestWriteCompact(t *testing.T) {
|
||||
srv := localBigQueryServer(t)
|
||||
defer srv.Close()
|
||||
|
||||
b := &BigQuery{
|
||||
Project: "test-project",
|
||||
Dataset: "test-dataset",
|
||||
Timeout: defaultTimeout,
|
||||
CompactTable: "test-metrics",
|
||||
}
|
||||
|
||||
mockMetrics := testutil.MockMetrics()
|
||||
|
||||
require.NoError(t, b.Init())
|
||||
require.NoError(t, b.setUpTestClient(srv.URL))
|
||||
require.NoError(t, b.Connect())
|
||||
|
||||
require.NoError(t, b.Write(mockMetrics))
|
||||
|
||||
var rows []map[string]json.RawMessage
|
||||
require.NoError(t, json.Unmarshal(receivedBody["rows"], &rows))
|
||||
require.Len(t, rows, 1)
|
||||
require.Contains(t, rows[0], "json")
|
||||
|
||||
var row interface{}
|
||||
require.NoError(t, json.Unmarshal(rows[0]["json"], &row))
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"timestamp": "2009-11-10T23:00:00Z",
|
||||
"name": "test1",
|
||||
"tags": `{"tag1":"value1"}`,
|
||||
"fields": `{"value":1}`,
|
||||
}, row)
|
||||
|
||||
require.NoError(t, b.Close())
|
||||
}
|
||||
|
||||
func TestAutoDetect(t *testing.T) {
|
||||
srv := localBigQueryServer(t)
|
||||
defer srv.Close()
|
||||
|
||||
b := &BigQuery{
|
||||
Dataset: "test-dataset",
|
||||
Timeout: defaultTimeout,
|
||||
CompactTable: "test-metrics",
|
||||
}
|
||||
|
||||
credentialsJSON := []byte(`{"type": "service_account", "project_id": "test-project"}`)
|
||||
|
||||
require.NoError(t, b.Init())
|
||||
require.NoError(t, b.setUpTestClientWithJSON(srv.URL, credentialsJSON))
|
||||
require.NoError(t, b.Connect())
|
||||
require.NoError(t, b.Close())
|
||||
}
|
||||
|
||||
func (b *BigQuery) setUpTestClient(endpointURL string) error {
|
||||
noAuth := option.WithoutAuthentication()
|
||||
endpoint := option.WithEndpoint(endpointURL)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := bigquery.NewClient(ctx, b.Project, noAuth, endpoint)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.client = c
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BigQuery) setUpTestClientWithJSON(endpointURL string, credentialsJSON []byte) error {
|
||||
noAuth := option.WithoutAuthentication()
|
||||
endpoint := option.WithEndpoint(endpointURL)
|
||||
credentials := option.WithCredentialsJSON(credentialsJSON)
|
||||
skipValidate := internaloption.SkipDialSettingsValidation()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := bigquery.NewClient(ctx, b.Project, credentials, noAuth, endpoint, skipValidate)
|
||||
|
||||
b.client = c
|
||||
return err
|
||||
}
|
||||
|
||||
func localBigQueryServer(t *testing.T) *httptest.Server {
|
||||
srv := httptest.NewServer(http.NotFoundHandler())
|
||||
|
||||
srv.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/projects/test-project/datasets/test-dataset/tables/test1/insertAll",
|
||||
"/projects/test-project/datasets/test-dataset/tables/test-metrics/insertAll":
|
||||
decoder := json.NewDecoder(r.Body)
|
||||
if err := decoder.Decode(&receivedBody); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if _, err := w.Write([]byte(successfulResponse)); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
case "/projects/test-project/datasets/test-dataset/tables/test-metrics":
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if _, err := w.Write([]byte("{}")); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
if _, err := w.Write([]byte(r.URL.String())); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return srv
|
||||
}
|
19
plugins/outputs/bigquery/sample.conf
Normal file
19
plugins/outputs/bigquery/sample.conf
Normal file
|
@ -0,0 +1,19 @@
|
|||
# Configuration for Google Cloud BigQuery to send entries
|
||||
[[outputs.bigquery]]
|
||||
## Credentials File
|
||||
credentials_file = "/path/to/service/account/key.json"
|
||||
|
||||
## Google Cloud Platform Project
|
||||
# project = ""
|
||||
|
||||
## The namespace for the metric descriptor
|
||||
dataset = "telegraf"
|
||||
|
||||
## Timeout for BigQuery operations.
|
||||
# timeout = "5s"
|
||||
|
||||
## Character to replace hyphens on Metric name
|
||||
# replace_hyphen_to = "_"
|
||||
|
||||
## Write all metrics in a single compact table
|
||||
# compact_table = ""
|
90
plugins/outputs/clarify/README.md
Normal file
90
plugins/outputs/clarify/README.md
Normal file
|
@ -0,0 +1,90 @@
|
|||
# Clarify Output Plugin
|
||||
|
||||
This plugin writes metrics to [Clarify][clarify]. To use this plugin you will
|
||||
need to obtain a set of [credentials][credentials].
|
||||
|
||||
⭐ Telegraf v1.27.0
|
||||
🏷️ cloud, datastore
|
||||
💻 all
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
## Configuration to publish Telegraf metrics to Clarify
|
||||
[[outputs.clarify]]
|
||||
## Credentials File (Oauth 2.0 from Clarify integration)
|
||||
credentials_file = "/path/to/clarify/credentials.json"
|
||||
|
||||
## Clarify username password (Basic Auth from Clarify integration)
|
||||
username = "i-am-bob"
|
||||
password = "secret-password"
|
||||
|
||||
## Timeout for Clarify operations
|
||||
# timeout = "20s"
|
||||
|
||||
## Optional tags to be included when generating the unique ID for a signal in Clarify
|
||||
# id_tags = []
|
||||
# clarify_id_tag = 'clarify_input_id'
|
||||
```
|
||||
|
||||
You can use either a credentials file or username/password.
|
||||
If both are present and valid in the configuration the
|
||||
credentials file will be used.
|
||||
|
||||
## How Telegraf Metrics map to Clarify signals
|
||||
|
||||
Clarify signal names are formed by joining the Telegraf metric name and the
|
||||
field key with a `.` character. Telegraf tags are added to signal labels.
|
||||
|
||||
If you wish to specify a specific tag to use as the input id, set the config
|
||||
option `clarify_id_tag` to the tag containing the id to be used.
|
||||
If this tag is present and there is only one field present in the metric,
|
||||
this tag will be used as the inputID in Clarify. If there are more fields
|
||||
available in the metric, the tag will be ignored and normal id generation
|
||||
will be used.
|
||||
|
||||
If information from one or several tags is needed to uniquely identify a metric
|
||||
field, the id_tags array can be added to the config with the needed tag names.
|
||||
E.g:
|
||||
|
||||
`id_tags = ['sensor']`
|
||||
|
||||
Clarify only supports values that can be converted to floating point numbers.
|
||||
Strings and invalid numbers are ignored.
|
||||
|
||||
## Example
|
||||
|
||||
The following input would be stored in Clarify with the values shown below:
|
||||
|
||||
```text
|
||||
temperature,host=demo.clarifylocal,sensor=TC0P value=49 1682670910000000000
|
||||
```
|
||||
|
||||
```json
|
||||
"signal" {
|
||||
"id": "temperature.value.TC0P"
|
||||
"name": "temperature.value"
|
||||
"labels": {
|
||||
"host": ["demo.clarifylocal"],
|
||||
"sensor": ["TC0P"]
|
||||
}
|
||||
}
|
||||
"values" {
|
||||
"times": ["2023-04-28T08:43:16+00:00"],
|
||||
"series": {
|
||||
"temperature.value.TC0P": [49]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
[clarify]: https://clarify.io
|
||||
[credentials]: https://docs.clarify.io/users/admin/integrations/credentials
|
191
plugins/outputs/clarify/clarify.go
Normal file
191
plugins/outputs/clarify/clarify.go
Normal file
|
@ -0,0 +1,191 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
|
||||
package clarify
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/clarify/clarify-go"
|
||||
"github.com/clarify/clarify-go/fields"
|
||||
"github.com/clarify/clarify-go/views"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type Clarify struct {
|
||||
Username config.Secret `toml:"username"`
|
||||
Password config.Secret `toml:"password"`
|
||||
CredentialsFile string `toml:"credentials_file"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
IDTags []string `toml:"id_tags"`
|
||||
ClarifyIDTag string `toml:"clarify_id_tag"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
client *clarify.Client
|
||||
}
|
||||
|
||||
var errIDTooLong = errors.New("id too long (>128)")
|
||||
var errCredentials = errors.New("only credentials_file OR username/password can be specified")
|
||||
|
||||
const defaultTimeout = config.Duration(20 * time.Second)
|
||||
const allowedIDRunes = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-_:.#+/`
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
func (c *Clarify) Init() error {
|
||||
if c.Timeout <= 0 {
|
||||
c.Timeout = defaultTimeout
|
||||
}
|
||||
// Not blocking as it doesn't do any http requests, just sets up the necessary Oauth2 client.
|
||||
ctx := context.Background()
|
||||
switch {
|
||||
case c.CredentialsFile != "":
|
||||
if !c.Username.Empty() || !c.Password.Empty() {
|
||||
return errCredentials
|
||||
}
|
||||
creds, err := clarify.CredentialsFromFile(c.CredentialsFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.client = creds.Client(ctx)
|
||||
return nil
|
||||
case !c.Username.Empty() && !c.Password.Empty():
|
||||
username, err := c.Username.Get()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting username failed: %w", err)
|
||||
}
|
||||
password, err := c.Password.Get()
|
||||
if err != nil {
|
||||
username.Destroy()
|
||||
return fmt.Errorf("getting password failed: %w", err)
|
||||
}
|
||||
creds := clarify.BasicAuthCredentials(username.String(), password.String())
|
||||
username.Destroy()
|
||||
password.Destroy()
|
||||
c.client = creds.Client(ctx)
|
||||
return nil
|
||||
}
|
||||
return errors.New("no credentials provided")
|
||||
}
|
||||
|
||||
func (*Clarify) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Clarify) Write(metrics []telegraf.Metric) error {
|
||||
frame, signals := c.processMetrics(metrics)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Duration(c.Timeout))
|
||||
defer cancel()
|
||||
|
||||
if _, err := c.client.Insert(frame).Do(ctx); err != nil {
|
||||
return fmt.Errorf("inserting failed: %w", err)
|
||||
}
|
||||
|
||||
if _, err := c.client.SaveSignals(signals).Do(ctx); err != nil {
|
||||
return fmt.Errorf("saving signals failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Clarify) processMetrics(metrics []telegraf.Metric) (views.DataFrame, map[string]views.SignalSave) {
|
||||
signals := make(map[string]views.SignalSave)
|
||||
frame := views.DataFrame{}
|
||||
|
||||
for _, m := range metrics {
|
||||
for _, f := range m.FieldList() {
|
||||
value, err := internal.ToFloat64(f.Value)
|
||||
if err != nil {
|
||||
c.Log.Warnf("Skipping field %q of metric %q: %s", f.Key, m.Name(), err.Error())
|
||||
continue
|
||||
}
|
||||
id, err := c.generateID(m, f)
|
||||
if err != nil {
|
||||
c.Log.Warnf("Skipping field %q of metric %q: %s", f.Key, m.Name(), err.Error())
|
||||
continue
|
||||
}
|
||||
ts := fields.AsTimestamp(m.Time())
|
||||
|
||||
if _, ok := frame[id]; ok {
|
||||
frame[id][ts] = value
|
||||
} else {
|
||||
frame[id] = views.DataSeries{ts: value}
|
||||
}
|
||||
|
||||
s := views.SignalSave{}
|
||||
s.Name = m.Name() + "." + f.Key
|
||||
|
||||
for _, t := range m.TagList() {
|
||||
labelName := strings.ReplaceAll(t.Key, " ", "-")
|
||||
labelName = strings.ReplaceAll(labelName, "_", "-")
|
||||
labelName = strings.ToLower(labelName)
|
||||
s.Labels.Add(labelName, t.Value)
|
||||
}
|
||||
|
||||
signals[id] = s
|
||||
}
|
||||
}
|
||||
return frame, signals
|
||||
}
|
||||
|
||||
func normalizeID(id string) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
if strings.ContainsRune(allowedIDRunes, r) {
|
||||
return r
|
||||
}
|
||||
return '_'
|
||||
}, id)
|
||||
}
|
||||
|
||||
func (c *Clarify) generateID(m telegraf.Metric, f *telegraf.Field) (string, error) {
|
||||
var id string
|
||||
if c.ClarifyIDTag != "" {
|
||||
if cid, exist := m.GetTag(c.ClarifyIDTag); exist && len(m.FieldList()) == 1 {
|
||||
id = cid
|
||||
}
|
||||
}
|
||||
if id == "" {
|
||||
parts := make([]string, 0, len(c.IDTags)+2)
|
||||
parts = append(parts, m.Name(), f.Key)
|
||||
|
||||
for _, idTag := range c.IDTags {
|
||||
if k, found := m.GetTag(idTag); found {
|
||||
parts = append(parts, k)
|
||||
}
|
||||
}
|
||||
id = strings.Join(parts, ".")
|
||||
}
|
||||
id = normalizeID(id)
|
||||
if len(id) > 128 {
|
||||
return id, errIDTooLong
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (*Clarify) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (c *Clarify) Close() error {
|
||||
c.client = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("clarify", func() telegraf.Output {
|
||||
return &Clarify{
|
||||
Timeout: defaultTimeout,
|
||||
}
|
||||
})
|
||||
}
|
318
plugins/outputs/clarify/clarify_test.go
Normal file
318
plugins/outputs/clarify/clarify_test.go
Normal file
|
@ -0,0 +1,318 @@
|
|||
package clarify
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/clarify/clarify-go"
|
||||
"github.com/clarify/clarify-go/jsonrpc"
|
||||
"github.com/clarify/clarify-go/views"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var errTimeout = errors.New("timeout: operation timed out")
|
||||
|
||||
const validResponse = `{
|
||||
"signalsByInput" : {
|
||||
"test1.value" : {
|
||||
"id": "c8bvu9fqfsjctpv7b6fg",
|
||||
"created" : true
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
type MockHandler struct {
|
||||
jsonResult string
|
||||
sleep time.Duration
|
||||
}
|
||||
|
||||
func (m *MockHandler) Do(ctx context.Context, _ jsonrpc.Request, result any) error {
|
||||
err := json.Unmarshal([]byte(m.jsonResult), result)
|
||||
if m.sleep > 0 {
|
||||
timer := time.NewTimer(m.sleep)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return errTimeout
|
||||
case <-timer.C:
|
||||
timer.Stop()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func TestGenerateID(t *testing.T) {
|
||||
clfy := &Clarify{
|
||||
Log: testutil.Logger{},
|
||||
IDTags: []string{"tag1", "tag2"},
|
||||
ClarifyIDTag: "clarify_input_id",
|
||||
}
|
||||
var idTests = []struct {
|
||||
inMetric telegraf.Metric
|
||||
outID []string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
testutil.MustMetric(
|
||||
"cpu+='''..2!@#$abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890",
|
||||
map[string]string{
|
||||
"tag1": "78sx",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"time_idle": math.NaN(),
|
||||
},
|
||||
time.Now()),
|
||||
[]string{"cpu.time_idle.78sx"},
|
||||
errIDTooLong,
|
||||
},
|
||||
{
|
||||
testutil.MustMetric(
|
||||
"cpu@@",
|
||||
map[string]string{
|
||||
"tag1": "78sx",
|
||||
"tag2": "33t2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"time_idle": math.NaN(),
|
||||
},
|
||||
time.Now()),
|
||||
[]string{"cpu__.time_idle.78sx.33t2"},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.MustMetric(
|
||||
"temperature",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"cpu1": 12,
|
||||
"cpu2": 13,
|
||||
},
|
||||
time.Now()),
|
||||
[]string{"temperature.cpu1", "temperature.cpu2"},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.MustMetric(
|
||||
"legacy_measurement",
|
||||
map[string]string{
|
||||
"clarify_input_id": "e5e82f63-3700-4997-835d-eb366b7294a2",
|
||||
"xid": "78sx",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 1337,
|
||||
},
|
||||
time.Now()),
|
||||
[]string{"e5e82f63-3700-4997-835d-eb366b7294a2"},
|
||||
nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range idTests {
|
||||
for n, f := range tt.inMetric.FieldList() {
|
||||
id, err := clfy.generateID(tt.inMetric, f)
|
||||
if tt.err != nil {
|
||||
require.ErrorIs(t, err, tt.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.True(t, slices.Contains(tt.outID, id), "\nexpected %+v\ngot %+v\n", tt.outID[n], id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessMetrics(t *testing.T) {
|
||||
clfy := &Clarify{
|
||||
Log: testutil.Logger{},
|
||||
IDTags: []string{"tag1", "tag2", "node_id"},
|
||||
ClarifyIDTag: "clarify_input_id",
|
||||
}
|
||||
var idTests = []struct {
|
||||
inMetric telegraf.Metric
|
||||
outFrame views.DataFrame
|
||||
outSignals map[string]views.SignalSave
|
||||
}{
|
||||
{
|
||||
testutil.MustMetric(
|
||||
"cpu1",
|
||||
map[string]string{
|
||||
"tag1": "78sx",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"time_idle": 1337.3,
|
||||
},
|
||||
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)),
|
||||
views.DataFrame{
|
||||
"cpu1.time_idle.78sx": views.DataSeries{
|
||||
1257894000000000: 1337.3,
|
||||
},
|
||||
},
|
||||
map[string]views.SignalSave{
|
||||
"cpu1.time_idle.78sx": {
|
||||
SignalSaveAttributes: views.SignalSaveAttributes{
|
||||
Name: "cpu1.time_idle",
|
||||
Labels: map[string][]string{
|
||||
"tag1": {"78sx"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testutil.MustMetric(
|
||||
"cpu2",
|
||||
map[string]string{
|
||||
"tag1": "78sx",
|
||||
"tag2": "33t2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"time_idle": 200,
|
||||
},
|
||||
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)),
|
||||
views.DataFrame{
|
||||
"cpu2.time_idle.78sx.33t2": views.DataSeries{
|
||||
1257894000000000: 200,
|
||||
},
|
||||
},
|
||||
map[string]views.SignalSave{
|
||||
"cpu2.time_idle.78sx.33t2": {
|
||||
SignalSaveAttributes: views.SignalSaveAttributes{
|
||||
Name: "cpu2.time_idle",
|
||||
Labels: map[string][]string{
|
||||
"tag1": {"78sx"},
|
||||
"tag2": {"33t2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testutil.MustMetric(
|
||||
"temperature",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"cpu1": 12,
|
||||
"cpu2": 13,
|
||||
},
|
||||
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)),
|
||||
views.DataFrame{
|
||||
"temperature.cpu1": views.DataSeries{
|
||||
1257894000000000: 12,
|
||||
},
|
||||
"temperature.cpu2": views.DataSeries{
|
||||
1257894000000000: 13,
|
||||
},
|
||||
},
|
||||
map[string]views.SignalSave{
|
||||
"temperature.cpu1": {
|
||||
SignalSaveAttributes: views.SignalSaveAttributes{
|
||||
Name: "temperature.cpu1",
|
||||
},
|
||||
},
|
||||
"temperature.cpu2": {
|
||||
SignalSaveAttributes: views.SignalSaveAttributes{
|
||||
Name: "temperature.cpu2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testutil.MustMetric(
|
||||
"legacy_measurement",
|
||||
map[string]string{
|
||||
"clarify_input_id": "e5e82f63-3700-4997-835d-eb366b7294a2",
|
||||
"xid": "78sx",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 123.333,
|
||||
},
|
||||
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)),
|
||||
views.DataFrame{
|
||||
"e5e82f63-3700-4997-835d-eb366b7294a2": views.DataSeries{
|
||||
1257894000000000: 123.333,
|
||||
},
|
||||
},
|
||||
map[string]views.SignalSave{
|
||||
"e5e82f63-3700-4997-835d-eb366b7294a2": {
|
||||
SignalSaveAttributes: views.SignalSaveAttributes{
|
||||
Name: "legacy_measurement.value",
|
||||
Labels: map[string][]string{
|
||||
"clarify-input-id": {"e5e82f63-3700-4997-835d-eb366b7294a2"},
|
||||
"xid": {"78sx"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testutil.MustMetric(
|
||||
"opc_metric",
|
||||
map[string]string{
|
||||
"node_id": "ns=1;s=Omron PLC.Objects.new_Controller_0.GlobalVars.counter1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 12345.6789,
|
||||
"quality": "GOOD",
|
||||
},
|
||||
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)),
|
||||
views.DataFrame{
|
||||
"opc_metric.value.ns_1_s_Omron_PLC.Objects.new_Controller_0.GlobalVars.counter1": views.DataSeries{
|
||||
1257894000000000: 12345.6789,
|
||||
},
|
||||
},
|
||||
map[string]views.SignalSave{
|
||||
"opc_metric.value.ns_1_s_Omron_PLC.Objects.new_Controller_0.GlobalVars.counter1": {
|
||||
SignalSaveAttributes: views.SignalSaveAttributes{
|
||||
Name: "opc_metric.value",
|
||||
Labels: map[string][]string{
|
||||
"node-id": {"ns=1;s=Omron PLC.Objects.new_Controller_0.GlobalVars.counter1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range idTests {
|
||||
of, os := clfy.processMetrics([]telegraf.Metric{tt.inMetric})
|
||||
require.EqualValues(t, tt.outFrame, of)
|
||||
require.EqualValues(t, tt.outSignals, os)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
clfy := &Clarify{
|
||||
Log: testutil.Logger{},
|
||||
Timeout: config.Duration(1 * time.Millisecond),
|
||||
client: clarify.NewClient("c8bvu9fqfsjctpv7b6fg", &MockHandler{
|
||||
sleep: 6 * time.Millisecond,
|
||||
jsonResult: validResponse,
|
||||
}),
|
||||
}
|
||||
|
||||
err := clfy.Write(nil)
|
||||
require.ErrorIs(t, err, errTimeout)
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
username := config.NewSecret([]byte("user"))
|
||||
|
||||
clfy := &Clarify{
|
||||
Log: testutil.Logger{},
|
||||
Timeout: config.Duration(1 * time.Millisecond),
|
||||
client: clarify.NewClient("c8bvu9fqfsjctpv7b6fg", &MockHandler{
|
||||
sleep: 6 * time.Millisecond,
|
||||
jsonResult: validResponse,
|
||||
}),
|
||||
Username: username,
|
||||
CredentialsFile: "file",
|
||||
}
|
||||
require.ErrorIs(t, clfy.Init(), errCredentials)
|
||||
}
|
15
plugins/outputs/clarify/sample.conf
Normal file
15
plugins/outputs/clarify/sample.conf
Normal file
|
@ -0,0 +1,15 @@
|
|||
## Configuration to publish Telegraf metrics to Clarify
|
||||
[[outputs.clarify]]
|
||||
## Credentials File (Oauth 2.0 from Clarify integration)
|
||||
credentials_file = "/path/to/clarify/credentials.json"
|
||||
|
||||
## Clarify username password (Basic Auth from Clarify integration)
|
||||
username = "i-am-bob"
|
||||
password = "secret-password"
|
||||
|
||||
## Timeout for Clarify operations
|
||||
# timeout = "20s"
|
||||
|
||||
## Optional tags to be included when generating the unique ID for a signal in Clarify
|
||||
# id_tags = []
|
||||
# clarify_id_tag = 'clarify_input_id'
|
82
plugins/outputs/cloud_pubsub/README.md
Normal file
82
plugins/outputs/cloud_pubsub/README.md
Normal file
|
@ -0,0 +1,82 @@
|
|||
# Google Cloud PubSub Output Plugin
|
||||
|
||||
This plugin publishes metrics to a [Google Cloud PubSub][pubsub] topic in one
|
||||
of the supported [data formats][data_formats].
|
||||
|
||||
⭐ Telegraf v1.10.0
|
||||
🏷️ cloud, messaging
|
||||
💻 all
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Publish Telegraf metrics to a Google Cloud PubSub topic
|
||||
[[outputs.cloud_pubsub]]
|
||||
## Required. Name of Google Cloud Platform (GCP) Project that owns
|
||||
## the given PubSub topic.
|
||||
project = "my-project"
|
||||
|
||||
## Required. Name of PubSub topic to publish metrics to.
|
||||
topic = "my-topic"
|
||||
|
||||
## Content encoding for message payloads, can be set to "gzip" or
|
||||
## "identity" to apply no encoding.
|
||||
# content_encoding = "identity"
|
||||
|
||||
## Required. Data format to consume.
|
||||
## Each data format has its own unique set of configuration options.
|
||||
## Read more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## Optional. Filepath for GCP credentials JSON file to authorize calls to
|
||||
## PubSub APIs. If not set explicitly, Telegraf will attempt to use
|
||||
## Application Default Credentials, which is preferred.
|
||||
# credentials_file = "path/to/my/creds.json"
|
||||
|
||||
## Optional. If true, will send all metrics per write in one PubSub message.
|
||||
# send_batched = true
|
||||
|
||||
## The following publish_* parameters specifically configures batching
|
||||
## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read
|
||||
## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings
|
||||
|
||||
## Optional. Send a request to PubSub (i.e. actually publish a batch)
|
||||
## when it has this many PubSub messages. If send_batched is true,
|
||||
## this is ignored and treated as if it were 1.
|
||||
# publish_count_threshold = 1000
|
||||
|
||||
## Optional. Send a request to PubSub (i.e. actually publish a batch)
|
||||
## when it has this many PubSub messages. If send_batched is true,
|
||||
## this is ignored and treated as if it were 1
|
||||
# publish_byte_threshold = 1000000
|
||||
|
||||
## Optional. Specifically configures requests made to the PubSub API.
|
||||
# publish_num_go_routines = 2
|
||||
|
||||
## Optional. Specifies a timeout for requests to the PubSub API.
|
||||
# publish_timeout = "30s"
|
||||
|
||||
## Optional. If true, published PubSub message data will be base64-encoded.
|
||||
# base64_data = false
|
||||
|
||||
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
|
||||
## plugin definition, otherwise additional config options are read as part of
|
||||
## the table
|
||||
|
||||
## Optional. PubSub attributes to add to metrics.
|
||||
# [outputs.cloud_pubsub.attributes]
|
||||
# my_attr = "tag_value"
|
||||
```
|
||||
|
||||
[pubsub]: https://cloud.google.com/pubsub
|
||||
[data_formats]: /docs/DATA_FORMATS_OUTPUT.md
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue