Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
58
plugins/outputs/event_hubs/README.md
Normal file
58
plugins/outputs/event_hubs/README.md
Normal file
|
@ -0,0 +1,58 @@
|
|||
# Azure Event Hubs Output Plugin
|
||||
|
||||
This plugin writes metrics to the [Azure Event Hubs][event_hubs] service in any
|
||||
of the supported [data formats][data_formats]. Metrics are sent as batches with
|
||||
each message payload containing one metric object, preferably as JSON as this
|
||||
eases integration with downstream components.
|
||||
|
||||
Each patch is sent to a single Event Hub within a namespace. In case no
|
||||
partition key is specified the batches will be automatically load-balanced
|
||||
(round-robin) across all the Event Hub partitions.
|
||||
|
||||
⭐ Telegraf v1.21.0
|
||||
🏷️ cloud,datastore
|
||||
💻 all
|
||||
|
||||
[event_hubs]: https://azure.microsoft.com/en-gb/services/event-hubs/
|
||||
[data_formats]: /docs/DATA_FORMATS_OUTPUT.md
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Configuration for Event Hubs output plugin
|
||||
[[outputs.event_hubs]]
|
||||
## Full connection string to the Event Hub instance. The shared access key
|
||||
## must have "Send" permissions on the target Event Hub.
|
||||
connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName"
|
||||
|
||||
## Partition key to use for the event
|
||||
## Metric tag or field name to use for the event partition key. The value of
|
||||
## this tag or field is set as the key for events if it exists. If both, tag
|
||||
## and field, exist the tag is preferred.
|
||||
# partition_key = ""
|
||||
|
||||
## Set the maximum batch message size in bytes
|
||||
## The allowable size depends on the Event Hub tier, see
|
||||
## https://learn.microsoft.com/azure/event-hubs/event-hubs-quotas#basic-vs-standard-vs-premium-vs-dedicated-tiers
|
||||
## for details. If unset the default size defined by Azure Event Hubs is
|
||||
## used (currently 1,000,000 bytes)
|
||||
# max_message_size = "1MB"
|
||||
|
||||
## Timeout for sending the data
|
||||
# timeout = "30s"
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "json"
|
||||
```
|
165
plugins/outputs/event_hubs/event_hubs.go
Normal file
165
plugins/outputs/event_hubs/event_hubs.go
Normal file
|
@ -0,0 +1,165 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package event_hubs
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type EventHubs struct {
|
||||
ConnectionString string `toml:"connection_string"`
|
||||
PartitionKey string `toml:"partition_key"`
|
||||
MaxMessageSize config.Size `toml:"max_message_size"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
client *azeventhubs.ProducerClient
|
||||
options azeventhubs.EventDataBatchOptions
|
||||
serializer telegraf.Serializer
|
||||
}
|
||||
|
||||
func (*EventHubs) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (e *EventHubs) Init() error {
|
||||
if e.MaxMessageSize > 0 {
|
||||
e.options.MaxBytes = uint64(e.MaxMessageSize)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EventHubs) Connect() error {
|
||||
cfg := &azeventhubs.ProducerClientOptions{
|
||||
ApplicationID: internal.FormatFullVersion(),
|
||||
RetryOptions: azeventhubs.RetryOptions{MaxRetries: -1},
|
||||
}
|
||||
|
||||
client, err := azeventhubs.NewProducerClientFromConnectionString(e.ConnectionString, "", cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create client: %w", err)
|
||||
}
|
||||
e.client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EventHubs) Close() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout))
|
||||
defer cancel()
|
||||
|
||||
return e.client.Close(ctx)
|
||||
}
|
||||
|
||||
func (e *EventHubs) SetSerializer(serializer telegraf.Serializer) {
|
||||
e.serializer = serializer
|
||||
}
|
||||
|
||||
func (e *EventHubs) Write(metrics []telegraf.Metric) error {
|
||||
ctx := context.Background()
|
||||
|
||||
batchOptions := e.options
|
||||
batches := make(map[string]*azeventhubs.EventDataBatch)
|
||||
for i := 0; i < len(metrics); i++ {
|
||||
m := metrics[i]
|
||||
|
||||
// Prepare the payload
|
||||
payload, err := e.serializer.Serialize(m)
|
||||
if err != nil {
|
||||
e.Log.Errorf("Could not serialize metric: %v", err)
|
||||
e.Log.Tracef("metric: %+v", m)
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the batcher for the chosen partition
|
||||
partition := "<default>"
|
||||
batchOptions.PartitionKey = nil
|
||||
if e.PartitionKey != "" {
|
||||
if key, ok := m.GetTag(e.PartitionKey); ok {
|
||||
partition = key
|
||||
batchOptions.PartitionKey = &partition
|
||||
} else if key, ok := m.GetField(e.PartitionKey); ok {
|
||||
if k, ok := key.(string); ok {
|
||||
partition = k
|
||||
batchOptions.PartitionKey = &partition
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, found := batches[partition]; !found {
|
||||
batches[partition], err = e.client.NewEventDataBatch(ctx, &batchOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating batch for partition %q failed: %w", partition, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Add the event to the partition and send it if the batch is full
|
||||
err = batches[partition].AddEventData(&azeventhubs.EventData{Body: payload}, nil)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the event doesn't fit into the batch anymore, send the batch
|
||||
if !errors.Is(err, azeventhubs.ErrEventDataTooLarge) {
|
||||
return fmt.Errorf("adding metric to batch for partition %q failed: %w", partition, err)
|
||||
}
|
||||
|
||||
// The event is larger than the maximum allowed size so there
|
||||
// is nothing we can do here but have to drop the metric.
|
||||
if batches[partition].NumEvents() == 0 {
|
||||
e.Log.Errorf("Metric with %d bytes exceeds the maximum allowed size and must be dropped!", len(payload))
|
||||
e.Log.Tracef("metric: %+v", m)
|
||||
continue
|
||||
}
|
||||
if err := e.send(batches[partition]); err != nil {
|
||||
return fmt.Errorf("sending batch for partition %q failed: %w", partition, err)
|
||||
}
|
||||
|
||||
// Create a new metric and reiterate over the current metric to be
|
||||
// added in the next iteration of the for loop.
|
||||
batches[partition], err = e.client.NewEventDataBatch(ctx, &e.options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating batch for partition %q failed: %w", partition, err)
|
||||
}
|
||||
i--
|
||||
}
|
||||
|
||||
// Send the remaining batches that never exceeded the batch size
|
||||
for partition, batch := range batches {
|
||||
if batch.NumBytes() == 0 {
|
||||
continue
|
||||
}
|
||||
if err := e.send(batch); err != nil {
|
||||
return fmt.Errorf("sending batch for partition %q failed: %w", partition, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EventHubs) send(batch *azeventhubs.EventDataBatch) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout))
|
||||
defer cancel()
|
||||
|
||||
return e.client.SendEventDataBatch(ctx, batch, nil)
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("event_hubs", func() telegraf.Output {
|
||||
return &EventHubs{
|
||||
Timeout: config.Duration(30 * time.Second),
|
||||
}
|
||||
})
|
||||
}
|
249
plugins/outputs/event_hubs/event_hubs_test.go
Normal file
249
plugins/outputs/event_hubs/event_hubs_test.go
Normal file
|
@ -0,0 +1,249 @@
|
|||
package event_hubs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/modules/azure/eventhubs"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/serializers/json"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestEmulatorIntegration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Require the developers to explicitly accept the EULA of the emulator
|
||||
if os.Getenv("AZURE_EVENT_HUBS_EMULATOR_ACCEPT_EULA") != "yes" {
|
||||
t.Skip(`
|
||||
Skipping due to unexcepted EULA. To run this test, please check the EULA of the emulator
|
||||
at https://github.com/Azure/azure-event-hubs-emulator-installer/blob/main/EMULATOR_EULA.md
|
||||
and accept it by setting the environment variable AZURE_EVENT_HUBS_EMULATOR_ACCEPT_EULA
|
||||
to 'yes'.
|
||||
`)
|
||||
}
|
||||
|
||||
// Load the configuration for the Event-Hubs instance
|
||||
emulatorConfig, err := os.ReadFile(filepath.Join("testdata", "Config.json"))
|
||||
require.NoError(t, err, "reading config failed")
|
||||
|
||||
// Setup the Azure Event Hub emulator environment
|
||||
// See https://learn.microsoft.com/en-us/azure/event-hubs/test-locally-with-event-hub-emulator
|
||||
emulator, err := eventhubs.Run(
|
||||
t.Context(),
|
||||
"mcr.microsoft.com/azure-messaging/eventhubs-emulator:2.1.0",
|
||||
eventhubs.WithAcceptEULA(),
|
||||
eventhubs.WithConfig(bytes.NewReader(emulatorConfig)),
|
||||
)
|
||||
require.NoError(t, err, "failed to start container")
|
||||
defer emulator.Terminate(t.Context()) //nolint:errcheck // Can't do anything anyway
|
||||
|
||||
conn, err := emulator.ConnectionString(t.Context())
|
||||
require.NoError(t, err, "getting connection string failed")
|
||||
conn += "EntityPath=test"
|
||||
|
||||
// Setup plugin and connect
|
||||
serializer := &json.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
|
||||
plugin := &EventHubs{
|
||||
ConnectionString: conn,
|
||||
Timeout: config.Duration(3 * time.Second),
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
plugin.SetSerializer(serializer)
|
||||
require.NoError(t, plugin.Init())
|
||||
require.NoError(t, plugin.Connect())
|
||||
defer plugin.Close()
|
||||
|
||||
// Make sure we are connected
|
||||
require.Eventually(t, func() bool {
|
||||
return plugin.Write(testutil.MockMetrics()) == nil
|
||||
}, 3*time.Second, 500*time.Millisecond)
|
||||
|
||||
input := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"source": "foo",
|
||||
"division": "A",
|
||||
"type": "temperature",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 23,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"source": "foo",
|
||||
"division": "A",
|
||||
"type": "humidity",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 59,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"source": "bar",
|
||||
"division": "B",
|
||||
"type": "temperature",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 42,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"source": "bar",
|
||||
"division": "B",
|
||||
"type": "humidity",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 87,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
}
|
||||
|
||||
require.NoError(t, plugin.Write(input))
|
||||
}
|
||||
|
||||
func TestReconnectIntegration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Require the developers to explicitly accept the EULA of the emulator
|
||||
if os.Getenv("AZURE_EVENT_HUBS_EMULATOR_ACCEPT_EULA") != "yes" {
|
||||
t.Skip(`
|
||||
Skipping due to unexcepted EULA. To run this test, please check the EULA of the emulator
|
||||
at https://github.com/Azure/azure-event-hubs-emulator-installer/blob/main/EMULATOR_EULA.md
|
||||
and accept it by setting the environment variable AZURE_EVENT_HUBS_EMULATOR_ACCEPT_EULA
|
||||
to 'yes'.
|
||||
`)
|
||||
}
|
||||
|
||||
// Load the configuration for the Event-Hubs instance
|
||||
emulatorConfig, err := os.ReadFile(filepath.Join("testdata", "Config.json"))
|
||||
require.NoError(t, err, "reading config failed")
|
||||
|
||||
// Setup the Azure Event Hub emulator environment
|
||||
// See https://learn.microsoft.com/en-us/azure/event-hubs/test-locally-with-event-hub-emulator
|
||||
emulator, err := eventhubs.Run(
|
||||
t.Context(),
|
||||
"mcr.microsoft.com/azure-messaging/eventhubs-emulator:2.1.0",
|
||||
eventhubs.WithAcceptEULA(),
|
||||
eventhubs.WithConfig(bytes.NewReader(emulatorConfig)),
|
||||
)
|
||||
require.NoError(t, err, "failed to start container")
|
||||
defer emulator.Terminate(t.Context()) //nolint:errcheck // Can't do anything anyway
|
||||
|
||||
conn, err := emulator.ConnectionString(t.Context())
|
||||
require.NoError(t, err, "getting connection string failed")
|
||||
conn += "EntityPath=test"
|
||||
|
||||
// Setup plugin and connect
|
||||
serializer := &json.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
|
||||
plugin := &EventHubs{
|
||||
ConnectionString: conn,
|
||||
Timeout: config.Duration(3 * time.Second),
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
plugin.SetSerializer(serializer)
|
||||
require.NoError(t, plugin.Init())
|
||||
require.NoError(t, plugin.Connect())
|
||||
defer plugin.Close()
|
||||
|
||||
// Make sure we are connected
|
||||
require.Eventually(t, func() bool {
|
||||
return plugin.Write(testutil.MockMetrics()) == nil
|
||||
}, 3*time.Second, 500*time.Millisecond)
|
||||
|
||||
input := []telegraf.Metric{
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"source": "foo",
|
||||
"division": "A",
|
||||
"type": "temperature",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 23,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"source": "foo",
|
||||
"division": "A",
|
||||
"type": "humidity",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 59,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"source": "bar",
|
||||
"division": "B",
|
||||
"type": "temperature",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 42,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"test",
|
||||
map[string]string{
|
||||
"source": "bar",
|
||||
"division": "B",
|
||||
"type": "humidity",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 87,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
}
|
||||
|
||||
// This write should succeed as we should be able to connect to the
|
||||
// container
|
||||
require.NoError(t, plugin.Write(input))
|
||||
|
||||
// Instantiate a docker client to be able to pause/resume the container
|
||||
client, err := testcontainers.NewDockerClientWithOpts(t.Context())
|
||||
require.NoError(t, err, "creating docker client failed")
|
||||
|
||||
// Pause the container to simulate connection loss. Subsequent writes
|
||||
// should fail until the container is resumed
|
||||
require.NoError(t, client.ContainerPause(t.Context(), emulator.GetContainerID()))
|
||||
require.ErrorIs(t, plugin.Write(input), context.DeadlineExceeded)
|
||||
|
||||
// Resume the container to check if the plugin reconnects
|
||||
require.NoError(t, client.ContainerUnpause(t.Context(), emulator.GetContainerID()))
|
||||
require.NoError(t, plugin.Write(input))
|
||||
}
|
27
plugins/outputs/event_hubs/sample.conf
Normal file
27
plugins/outputs/event_hubs/sample.conf
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Configuration for Event Hubs output plugin
|
||||
[[outputs.event_hubs]]
|
||||
## Full connection string to the Event Hub instance. The shared access key
|
||||
## must have "Send" permissions on the target Event Hub.
|
||||
connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName"
|
||||
|
||||
## Partition key to use for the event
|
||||
## Metric tag or field name to use for the event partition key. The value of
|
||||
## this tag or field is set as the key for events if it exists. If both, tag
|
||||
## and field, exist the tag is preferred.
|
||||
# partition_key = ""
|
||||
|
||||
## Set the maximum batch message size in bytes
|
||||
## The allowable size depends on the Event Hub tier, see
|
||||
## https://learn.microsoft.com/azure/event-hubs/event-hubs-quotas#basic-vs-standard-vs-premium-vs-dedicated-tiers
|
||||
## for details. If unset the default size defined by Azure Event Hubs is
|
||||
## used (currently 1,000,000 bytes)
|
||||
# max_message_size = "1MB"
|
||||
|
||||
## Timeout for sending the data
|
||||
# timeout = "30s"
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "json"
|
24
plugins/outputs/event_hubs/testdata/Config.json
vendored
Normal file
24
plugins/outputs/event_hubs/testdata/Config.json
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
"UserConfig": {
|
||||
"NamespaceConfig": [
|
||||
{
|
||||
"Type": "EventHub",
|
||||
"Name": "emulatorNs1",
|
||||
"Entities": [
|
||||
{
|
||||
"Name": "test",
|
||||
"PartitionCount": 2,
|
||||
"ConsumerGroups": [
|
||||
{
|
||||
"Name": "cg1"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"LoggingConfig": {
|
||||
"Type": "Console"
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue