Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
195
plugins/outputs/kinesis/README.md
Normal file
195
plugins/outputs/kinesis/README.md
Normal file
|
@ -0,0 +1,195 @@
|
|||
# Amazon Kinesis Output Plugin
|
||||
|
||||
This plugin writes metrics to a [Amazon Kinesis][kinesis] endpoint. It will
|
||||
batch all Points in one request to reduce the number of API requests.
|
||||
|
||||
Please consult [Amazon's official documentation][docs] for more details on the
|
||||
Kinesis architecture and concepts.
|
||||
|
||||
⭐ Telegraf v0.2.5
|
||||
🏷️ cloud, messaging
|
||||
💻 all
|
||||
|
||||
[kinesis]: https://aws.amazon.com/kinesis
|
||||
[docs]: http://docs.aws.amazon.com/kinesis/latest/dev/key-concepts.html
|
||||
|
||||
## Amazon Authentication
|
||||
|
||||
This plugin uses a credential chain for Authentication with the Kinesis API
|
||||
endpoint. In the following order the plugin will attempt to authenticate.
|
||||
|
||||
1. Web identity provider credentials via STS if `role_arn` and
|
||||
`web_identity_token_file` are specified
|
||||
1. Assumed credentials via STS if `role_arn` attribute is specified (source
|
||||
credentials are evaluated from subsequent rules)
|
||||
1. Explicit credentials from `access_key`, `secret_key`, and `token` attributes
|
||||
1. Shared profile from `profile` attribute
|
||||
1. [Environment Variables][1]
|
||||
1. [Shared Credentials][2]
|
||||
1. [EC2 Instance Profile][3]
|
||||
|
||||
If you are using credentials from a web identity provider, you can specify the
|
||||
session name using `role_session_name`. If left empty, the current timestamp
|
||||
will be used.
|
||||
|
||||
[1]: https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables
|
||||
[2]: https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file
|
||||
[3]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Configuration for the AWS Kinesis output.
|
||||
[[outputs.kinesis]]
|
||||
## Amazon REGION of kinesis endpoint.
|
||||
region = "ap-southeast-2"
|
||||
|
||||
## Amazon Credentials
|
||||
## Credentials are loaded in the following order
|
||||
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
|
||||
## 2) Assumed credentials via STS if role_arn is specified
|
||||
## 3) explicit credentials from 'access_key' and 'secret_key'
|
||||
## 4) shared profile from 'profile'
|
||||
## 5) environment variables
|
||||
## 6) shared credentials file
|
||||
## 7) EC2 Instance Profile
|
||||
#access_key = ""
|
||||
#secret_key = ""
|
||||
#token = ""
|
||||
#role_arn = ""
|
||||
#web_identity_token_file = ""
|
||||
#role_session_name = ""
|
||||
#profile = ""
|
||||
#shared_credential_file = ""
|
||||
|
||||
## Endpoint to make request against, the correct endpoint is automatically
|
||||
## determined and this option should only be set if you wish to override the
|
||||
## default.
|
||||
## ex: endpoint_url = "http://localhost:8000"
|
||||
# endpoint_url = ""
|
||||
|
||||
## Kinesis StreamName must exist prior to starting telegraf.
|
||||
streamname = "StreamName"
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## debug will show upstream aws messages.
|
||||
debug = false
|
||||
|
||||
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
|
||||
## plugin definition, otherwise additional config options are read as part of
|
||||
## the table
|
||||
|
||||
## The partition key can be calculated using one of several methods:
|
||||
##
|
||||
## Use a static value for all writes:
|
||||
# [outputs.kinesis.partition]
|
||||
# method = "static"
|
||||
# key = "howdy"
|
||||
#
|
||||
## Use a random partition key on each write:
|
||||
# [outputs.kinesis.partition]
|
||||
# method = "random"
|
||||
#
|
||||
## Use the measurement name as the partition key:
|
||||
# [outputs.kinesis.partition]
|
||||
# method = "measurement"
|
||||
#
|
||||
## Use the value of a tag for all writes, if the tag is not set the empty
|
||||
## default option will be used. When no default, defaults to "telegraf"
|
||||
# [outputs.kinesis.partition]
|
||||
# method = "tag"
|
||||
# key = "host"
|
||||
# default = "mykey"
|
||||
```
|
||||
|
||||
For this output plugin to function correctly the following variables must be
|
||||
configured.
|
||||
|
||||
* region
|
||||
* streamname
|
||||
|
||||
### region
|
||||
|
||||
The region is the Amazon region that you wish to connect to. Examples include
|
||||
but are not limited to
|
||||
|
||||
* us-west-1
|
||||
* us-west-2
|
||||
* us-east-1
|
||||
* ap-southeast-1
|
||||
* ap-southeast-2
|
||||
|
||||
### streamname
|
||||
|
||||
The streamname is used by the plugin to ensure that data is sent to the correct
|
||||
Kinesis stream. It is important to note that the stream *MUST* be pre-configured
|
||||
for this plugin to function correctly. If the stream does not exist the plugin
|
||||
will result in telegraf exiting with an exit code of 1.
|
||||
|
||||
### partitionkey [DEPRECATED]
|
||||
|
||||
This is used to group data within a stream. Currently this plugin only supports
|
||||
a single partitionkey. Manually configuring different hosts, or groups of hosts
|
||||
with manually selected partitionkeys might be a workable solution to scale out.
|
||||
|
||||
### use_random_partitionkey [DEPRECATED]
|
||||
|
||||
When true a random UUID will be generated and used as the partitionkey when
|
||||
sending data to Kinesis. This allows data to evenly spread across multiple
|
||||
shards in the stream. Due to using a random partitionKey there can be no
|
||||
guarantee of ordering when consuming the data off the shards. If true then the
|
||||
partitionkey option will be ignored.
|
||||
|
||||
### partition
|
||||
|
||||
This is used to group data within a stream. Currently four methods are
|
||||
supported: random, static, tag or measurement
|
||||
|
||||
#### random
|
||||
|
||||
This will generate a UUIDv4 for each metric to spread them across shards. Any
|
||||
guarantee of ordering is lost with this method
|
||||
|
||||
#### static
|
||||
|
||||
This uses a static string as a partitionkey. All metrics will be mapped to the
|
||||
same shard which may limit throughput.
|
||||
|
||||
#### tag
|
||||
|
||||
This will take the value of the specified tag from each metric as the
|
||||
partitionKey. If the tag is not found the `default` value will be used or
|
||||
`telegraf` if unspecified
|
||||
|
||||
#### measurement
|
||||
|
||||
This will use the measurement's name as the partitionKey.
|
||||
|
||||
### format
|
||||
|
||||
The format configuration value has been designated to allow people to change the
|
||||
format of the Point as written to Kinesis. Right now there are two supported
|
||||
formats string and custom.
|
||||
|
||||
#### string
|
||||
|
||||
String is defined using the default Point.String() value and translated to
|
||||
[]byte for the Kinesis stream.
|
||||
|
||||
#### custom
|
||||
|
||||
Custom is a string defined by a number of values in the FormatMetric() function.
|
196
plugins/outputs/kinesis/kinesis.go
Normal file
196
plugins/outputs/kinesis/kinesis.go
Normal file
|
@ -0,0 +1,196 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package kinesis
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/kinesis"
|
||||
"github.com/aws/aws-sdk-go-v2/service/kinesis/types"
|
||||
"github.com/gofrs/uuid/v5"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
common_aws "github.com/influxdata/telegraf/plugins/common/aws"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
// Limit set by AWS (https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecords.html)
|
||||
const maxRecordsPerRequest uint32 = 500
|
||||
|
||||
type (
|
||||
KinesisOutput struct {
|
||||
StreamName string `toml:"streamname"`
|
||||
PartitionKey string `toml:"partitionkey" deprecated:"1.5.0;1.35.0;use 'partition.key' instead"`
|
||||
RandomPartitionKey bool `toml:"use_random_partitionkey" deprecated:"1.5.0;1.35.0;use 'partition.method' instead"`
|
||||
Partition *Partition `toml:"partition"`
|
||||
Debug bool `toml:"debug"`
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
serializer telegraf.Serializer
|
||||
svc kinesisClient
|
||||
|
||||
common_aws.CredentialConfig
|
||||
}
|
||||
|
||||
Partition struct {
|
||||
Method string `toml:"method"`
|
||||
Key string `toml:"key"`
|
||||
Default string `toml:"default"`
|
||||
}
|
||||
)
|
||||
|
||||
type kinesisClient interface {
|
||||
PutRecords(context.Context, *kinesis.PutRecordsInput, ...func(*kinesis.Options)) (*kinesis.PutRecordsOutput, error)
|
||||
}
|
||||
|
||||
func (*KinesisOutput) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Connect() error {
|
||||
if k.Partition == nil {
|
||||
k.Log.Error("Deprecated partitionkey configuration in use, please consider using outputs.kinesis.partition")
|
||||
}
|
||||
|
||||
// We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using
|
||||
// environment variables, and then Shared Credentials.
|
||||
if k.Debug {
|
||||
k.Log.Infof("Establishing a connection to Kinesis in %s", k.Region)
|
||||
}
|
||||
|
||||
cfg, err := k.CredentialConfig.Credentials()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if k.EndpointURL != "" {
|
||||
cfg.BaseEndpoint = &k.EndpointURL
|
||||
}
|
||||
|
||||
svc := kinesis.NewFromConfig(cfg)
|
||||
|
||||
_, err = svc.DescribeStreamSummary(context.Background(), &kinesis.DescribeStreamSummaryInput{
|
||||
StreamName: aws.String(k.StreamName),
|
||||
})
|
||||
k.svc = svc
|
||||
return err
|
||||
}
|
||||
|
||||
func (*KinesisOutput) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) SetSerializer(serializer telegraf.Serializer) {
|
||||
k.serializer = serializer
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) writeKinesis(r []types.PutRecordsRequestEntry) time.Duration {
|
||||
start := time.Now()
|
||||
payload := &kinesis.PutRecordsInput{
|
||||
Records: r,
|
||||
StreamName: aws.String(k.StreamName),
|
||||
}
|
||||
|
||||
resp, err := k.svc.PutRecords(context.Background(), payload)
|
||||
if err != nil {
|
||||
k.Log.Errorf("Unable to write to Kinesis : %s", err.Error())
|
||||
return time.Since(start)
|
||||
}
|
||||
|
||||
if k.Debug {
|
||||
k.Log.Infof("Wrote: '%+v'", resp)
|
||||
}
|
||||
|
||||
failed := *resp.FailedRecordCount
|
||||
if failed > 0 {
|
||||
k.Log.Errorf("Unable to write %+v of %+v record(s) to Kinesis", failed, len(r))
|
||||
}
|
||||
|
||||
return time.Since(start)
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) getPartitionKey(metric telegraf.Metric) string {
|
||||
if k.Partition != nil {
|
||||
switch k.Partition.Method {
|
||||
case "static":
|
||||
return k.Partition.Key
|
||||
case "random":
|
||||
u, err := uuid.NewV4()
|
||||
if err != nil {
|
||||
return k.Partition.Default
|
||||
}
|
||||
return u.String()
|
||||
case "measurement":
|
||||
return metric.Name()
|
||||
case "tag":
|
||||
if t, ok := metric.GetTag(k.Partition.Key); ok {
|
||||
return t
|
||||
} else if len(k.Partition.Default) > 0 {
|
||||
return k.Partition.Default
|
||||
}
|
||||
// Default partition name if default is not set
|
||||
return "telegraf"
|
||||
default:
|
||||
k.Log.Errorf("You have configured a Partition method of %q which is not supported", k.Partition.Method)
|
||||
}
|
||||
}
|
||||
if k.RandomPartitionKey {
|
||||
u, err := uuid.NewV4()
|
||||
if err != nil {
|
||||
return k.Partition.Default
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
return k.PartitionKey
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Write(metrics []telegraf.Metric) error {
|
||||
var sz uint32
|
||||
|
||||
if len(metrics) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := make([]types.PutRecordsRequestEntry, 0, len(metrics))
|
||||
for _, metric := range metrics {
|
||||
sz++
|
||||
|
||||
values, err := k.serializer.Serialize(metric)
|
||||
if err != nil {
|
||||
k.Log.Debugf("Could not serialize metric: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
partitionKey := k.getPartitionKey(metric)
|
||||
|
||||
d := types.PutRecordsRequestEntry{
|
||||
Data: values,
|
||||
PartitionKey: aws.String(partitionKey),
|
||||
}
|
||||
|
||||
r = append(r, d)
|
||||
if sz == maxRecordsPerRequest {
|
||||
elapsed := k.writeKinesis(r)
|
||||
k.Log.Debugf("Wrote a %d point batch to Kinesis in %+v.", sz, elapsed)
|
||||
sz = 0
|
||||
r = nil
|
||||
}
|
||||
}
|
||||
if sz > 0 {
|
||||
elapsed := k.writeKinesis(r)
|
||||
k.Log.Debugf("Wrote a %d point batch to Kinesis in %+v.", sz, elapsed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("kinesis", func() telegraf.Output {
|
||||
return &KinesisOutput{}
|
||||
})
|
||||
}
|
596
plugins/outputs/kinesis/kinesis_test.go
Normal file
596
plugins/outputs/kinesis/kinesis_test.go
Normal file
|
@ -0,0 +1,596 @@
|
|||
package kinesis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/kinesis"
|
||||
"github.com/aws/aws-sdk-go-v2/service/kinesis/types"
|
||||
"github.com/gofrs/uuid/v5"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
const testPartitionKey = "partitionKey"
|
||||
const testShardID = "shardId-000000000003"
|
||||
const testSequenceNumber = "49543463076570308322303623326179887152428262250726293588"
|
||||
const testStreamName = "streamName"
|
||||
const zero int64 = 0
|
||||
|
||||
func TestPartitionKey(t *testing.T) {
|
||||
testPoint := testutil.TestMetric(1)
|
||||
|
||||
k := KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "static",
|
||||
Key: "-",
|
||||
},
|
||||
}
|
||||
require.Equal(t, "-", k.getPartitionKey(testPoint), "PartitionKey should be '-'")
|
||||
|
||||
k = KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "tag",
|
||||
Key: "tag1",
|
||||
},
|
||||
}
|
||||
require.Equal(t, testPoint.Tags()["tag1"], k.getPartitionKey(testPoint), "PartitionKey should be value of 'tag1'")
|
||||
|
||||
k = KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "tag",
|
||||
Key: "doesnotexist",
|
||||
Default: "somedefault",
|
||||
},
|
||||
}
|
||||
require.Equal(t, "somedefault", k.getPartitionKey(testPoint), "PartitionKey should use default")
|
||||
|
||||
k = KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "tag",
|
||||
Key: "doesnotexist",
|
||||
},
|
||||
}
|
||||
require.Equal(t, "telegraf", k.getPartitionKey(testPoint), "PartitionKey should be telegraf")
|
||||
|
||||
k = KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "not supported",
|
||||
},
|
||||
}
|
||||
require.Empty(t, k.getPartitionKey(testPoint), "PartitionKey should be value of ''")
|
||||
|
||||
k = KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "measurement",
|
||||
},
|
||||
}
|
||||
require.Equal(t, testPoint.Name(), k.getPartitionKey(testPoint), "PartitionKey should be value of measurement name")
|
||||
|
||||
k = KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "random",
|
||||
},
|
||||
}
|
||||
partitionKey := k.getPartitionKey(testPoint)
|
||||
u, err := uuid.FromString(partitionKey)
|
||||
require.NoError(t, err, "Issue parsing UUID")
|
||||
require.Equal(t, byte(4), u.Version(), "PartitionKey should be UUIDv4")
|
||||
|
||||
k = KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
PartitionKey: "-",
|
||||
}
|
||||
require.Equal(t, "-", k.getPartitionKey(testPoint), "PartitionKey should be '-'")
|
||||
|
||||
k = KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
RandomPartitionKey: true,
|
||||
}
|
||||
partitionKey = k.getPartitionKey(testPoint)
|
||||
u, err = uuid.FromString(partitionKey)
|
||||
require.NoError(t, err, "Issue parsing UUID")
|
||||
require.Equal(t, byte(4), u.Version(), "PartitionKey should be UUIDv4")
|
||||
}
|
||||
|
||||
func TestWriteKinesis_WhenSuccess(t *testing.T) {
|
||||
records := []types.PutRecordsRequestEntry{
|
||||
{
|
||||
PartitionKey: aws.String(testPartitionKey),
|
||||
Data: []byte{0x65},
|
||||
},
|
||||
}
|
||||
|
||||
svc := &mockKinesisPutRecords{}
|
||||
svc.SetupResponse(
|
||||
0,
|
||||
[]types.PutRecordsResultEntry{
|
||||
{
|
||||
SequenceNumber: aws.String(testSequenceNumber),
|
||||
ShardId: aws.String(testShardID),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
k := KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
StreamName: testStreamName,
|
||||
svc: svc,
|
||||
}
|
||||
|
||||
elapsed := k.writeKinesis(records)
|
||||
require.GreaterOrEqual(t, elapsed.Nanoseconds(), zero)
|
||||
|
||||
svc.AssertRequests(t, []*kinesis.PutRecordsInput{
|
||||
{
|
||||
StreamName: aws.String(testStreamName),
|
||||
Records: records,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteKinesis_WhenRecordErrors(t *testing.T) {
|
||||
records := []types.PutRecordsRequestEntry{
|
||||
{
|
||||
PartitionKey: aws.String(testPartitionKey),
|
||||
Data: []byte{0x66},
|
||||
},
|
||||
}
|
||||
|
||||
svc := &mockKinesisPutRecords{}
|
||||
svc.SetupResponse(
|
||||
1,
|
||||
[]types.PutRecordsResultEntry{
|
||||
{
|
||||
ErrorCode: aws.String("InternalFailure"),
|
||||
ErrorMessage: aws.String("Internal Service Failure"),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
k := KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
StreamName: testStreamName,
|
||||
svc: svc,
|
||||
}
|
||||
|
||||
elapsed := k.writeKinesis(records)
|
||||
require.GreaterOrEqual(t, elapsed.Nanoseconds(), zero)
|
||||
|
||||
svc.AssertRequests(t, []*kinesis.PutRecordsInput{
|
||||
{
|
||||
StreamName: aws.String(testStreamName),
|
||||
Records: records,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteKinesis_WhenServiceError(t *testing.T) {
|
||||
records := []types.PutRecordsRequestEntry{
|
||||
{
|
||||
PartitionKey: aws.String(testPartitionKey),
|
||||
},
|
||||
}
|
||||
|
||||
svc := &mockKinesisPutRecords{}
|
||||
svc.SetupErrorResponse(
|
||||
&types.InvalidArgumentException{Message: aws.String("Invalid record")},
|
||||
)
|
||||
|
||||
k := KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
StreamName: testStreamName,
|
||||
svc: svc,
|
||||
}
|
||||
|
||||
elapsed := k.writeKinesis(records)
|
||||
require.GreaterOrEqual(t, elapsed.Nanoseconds(), zero)
|
||||
|
||||
svc.AssertRequests(t, []*kinesis.PutRecordsInput{
|
||||
{
|
||||
StreamName: aws.String(testStreamName),
|
||||
Records: records,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrite_NoMetrics(t *testing.T) {
|
||||
serializer := &influx.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
|
||||
svc := &mockKinesisPutRecords{}
|
||||
|
||||
k := KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "static",
|
||||
Key: "partitionKey",
|
||||
},
|
||||
StreamName: "stream",
|
||||
serializer: serializer,
|
||||
svc: svc,
|
||||
}
|
||||
|
||||
err := k.Write(nil)
|
||||
require.NoError(t, err, "Should not return error")
|
||||
|
||||
svc.AssertRequests(t, make([]*kinesis.PutRecordsInput, 0))
|
||||
}
|
||||
|
||||
func TestWrite_SingleMetric(t *testing.T) {
|
||||
serializer := &influx.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
|
||||
svc := &mockKinesisPutRecords{}
|
||||
svc.SetupGenericResponse(1, 0)
|
||||
|
||||
k := KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "static",
|
||||
Key: testPartitionKey,
|
||||
},
|
||||
StreamName: testStreamName,
|
||||
serializer: serializer,
|
||||
svc: svc,
|
||||
}
|
||||
|
||||
metric, metricData := createTestMetric(t, "metric1", serializer)
|
||||
err := k.Write([]telegraf.Metric{metric})
|
||||
require.NoError(t, err, "Should not return error")
|
||||
|
||||
svc.AssertRequests(t, []*kinesis.PutRecordsInput{
|
||||
{
|
||||
StreamName: aws.String(testStreamName),
|
||||
Records: []types.PutRecordsRequestEntry{
|
||||
{
|
||||
PartitionKey: aws.String(testPartitionKey),
|
||||
Data: metricData,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrite_MultipleMetrics_SinglePartialRequest(t *testing.T) {
|
||||
serializer := &influx.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
|
||||
svc := &mockKinesisPutRecords{}
|
||||
svc.SetupGenericResponse(3, 0)
|
||||
|
||||
k := KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "static",
|
||||
Key: testPartitionKey,
|
||||
},
|
||||
StreamName: testStreamName,
|
||||
serializer: serializer,
|
||||
svc: svc,
|
||||
}
|
||||
|
||||
metrics, metricsData := createTestMetrics(t, 3, serializer)
|
||||
err := k.Write(metrics)
|
||||
require.NoError(t, err, "Should not return error")
|
||||
|
||||
svc.AssertRequests(t, []*kinesis.PutRecordsInput{
|
||||
{
|
||||
StreamName: aws.String(testStreamName),
|
||||
Records: createPutRecordsRequestEntries(
|
||||
metricsData,
|
||||
),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrite_MultipleMetrics_SingleFullRequest(t *testing.T) {
|
||||
serializer := &influx.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
|
||||
svc := &mockKinesisPutRecords{}
|
||||
svc.SetupGenericResponse(maxRecordsPerRequest, 0)
|
||||
|
||||
k := KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "static",
|
||||
Key: testPartitionKey,
|
||||
},
|
||||
StreamName: testStreamName,
|
||||
serializer: serializer,
|
||||
svc: svc,
|
||||
}
|
||||
|
||||
metrics, metricsData := createTestMetrics(t, maxRecordsPerRequest, serializer)
|
||||
err := k.Write(metrics)
|
||||
require.NoError(t, err, "Should not return error")
|
||||
|
||||
svc.AssertRequests(t, []*kinesis.PutRecordsInput{
|
||||
{
|
||||
StreamName: aws.String(testStreamName),
|
||||
Records: createPutRecordsRequestEntries(
|
||||
metricsData,
|
||||
),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrite_MultipleMetrics_MultipleRequests(t *testing.T) {
|
||||
serializer := &influx.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
|
||||
svc := &mockKinesisPutRecords{}
|
||||
svc.SetupGenericResponse(maxRecordsPerRequest, 0)
|
||||
svc.SetupGenericResponse(1, 0)
|
||||
|
||||
k := KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "static",
|
||||
Key: testPartitionKey,
|
||||
},
|
||||
StreamName: testStreamName,
|
||||
serializer: serializer,
|
||||
svc: svc,
|
||||
}
|
||||
|
||||
metrics, metricsData := createTestMetrics(t, maxRecordsPerRequest+1, serializer)
|
||||
err := k.Write(metrics)
|
||||
require.NoError(t, err, "Should not return error")
|
||||
|
||||
svc.AssertRequests(t, []*kinesis.PutRecordsInput{
|
||||
{
|
||||
StreamName: aws.String(testStreamName),
|
||||
Records: createPutRecordsRequestEntries(
|
||||
metricsData[0:maxRecordsPerRequest],
|
||||
),
|
||||
},
|
||||
{
|
||||
StreamName: aws.String(testStreamName),
|
||||
Records: createPutRecordsRequestEntries(
|
||||
metricsData[maxRecordsPerRequest:],
|
||||
),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrite_MultipleMetrics_MultipleFullRequests(t *testing.T) {
|
||||
serializer := &influx.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
|
||||
svc := &mockKinesisPutRecords{}
|
||||
svc.SetupGenericResponse(maxRecordsPerRequest, 0)
|
||||
svc.SetupGenericResponse(maxRecordsPerRequest, 0)
|
||||
|
||||
k := KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "static",
|
||||
Key: testPartitionKey,
|
||||
},
|
||||
StreamName: testStreamName,
|
||||
serializer: serializer,
|
||||
svc: svc,
|
||||
}
|
||||
|
||||
metrics, metricsData := createTestMetrics(t, maxRecordsPerRequest*2, serializer)
|
||||
err := k.Write(metrics)
|
||||
require.NoError(t, err, "Should not return error")
|
||||
|
||||
svc.AssertRequests(t, []*kinesis.PutRecordsInput{
|
||||
{
|
||||
StreamName: aws.String(testStreamName),
|
||||
Records: createPutRecordsRequestEntries(
|
||||
metricsData[0:maxRecordsPerRequest],
|
||||
),
|
||||
},
|
||||
{
|
||||
StreamName: aws.String(testStreamName),
|
||||
Records: createPutRecordsRequestEntries(
|
||||
metricsData[maxRecordsPerRequest:],
|
||||
),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrite_SerializerError(t *testing.T) {
|
||||
serializer := &influx.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
|
||||
svc := &mockKinesisPutRecords{}
|
||||
svc.SetupGenericResponse(2, 0)
|
||||
|
||||
k := KinesisOutput{
|
||||
Log: testutil.Logger{},
|
||||
Partition: &Partition{
|
||||
Method: "static",
|
||||
Key: testPartitionKey,
|
||||
},
|
||||
StreamName: testStreamName,
|
||||
serializer: serializer,
|
||||
svc: svc,
|
||||
}
|
||||
|
||||
metric1, metric1Data := createTestMetric(t, "metric1", serializer)
|
||||
metric2, metric2Data := createTestMetric(t, "metric2", serializer)
|
||||
|
||||
// metric is invalid because of empty name
|
||||
invalidMetric := testutil.TestMetric(3, "")
|
||||
|
||||
err := k.Write([]telegraf.Metric{
|
||||
metric1,
|
||||
invalidMetric,
|
||||
metric2,
|
||||
})
|
||||
require.NoError(t, err, "Should not return error")
|
||||
|
||||
// remaining valid metrics should still get written
|
||||
svc.AssertRequests(t, []*kinesis.PutRecordsInput{
|
||||
{
|
||||
StreamName: aws.String(testStreamName),
|
||||
Records: []types.PutRecordsRequestEntry{
|
||||
{
|
||||
PartitionKey: aws.String(testPartitionKey),
|
||||
Data: metric1Data,
|
||||
},
|
||||
{
|
||||
PartitionKey: aws.String(testPartitionKey),
|
||||
Data: metric2Data,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type mockKinesisPutRecordsResponse struct {
|
||||
Output *kinesis.PutRecordsOutput
|
||||
Err error
|
||||
}
|
||||
|
||||
type mockKinesisPutRecords struct {
|
||||
requests []*kinesis.PutRecordsInput
|
||||
responses []*mockKinesisPutRecordsResponse
|
||||
}
|
||||
|
||||
func (m *mockKinesisPutRecords) SetupResponse(
|
||||
failedRecordCount int32,
|
||||
records []types.PutRecordsResultEntry,
|
||||
) {
|
||||
m.responses = append(m.responses, &mockKinesisPutRecordsResponse{
|
||||
Err: nil,
|
||||
Output: &kinesis.PutRecordsOutput{
|
||||
FailedRecordCount: aws.Int32(failedRecordCount),
|
||||
Records: records,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (m *mockKinesisPutRecords) SetupGenericResponse(successfulRecordCount uint32, failedRecordCount int32) {
|
||||
records := make([]types.PutRecordsResultEntry, 0, int32(successfulRecordCount)+failedRecordCount)
|
||||
for i := uint32(0); i < successfulRecordCount; i++ {
|
||||
records = append(records, types.PutRecordsResultEntry{
|
||||
SequenceNumber: aws.String(testSequenceNumber),
|
||||
ShardId: aws.String(testShardID),
|
||||
})
|
||||
}
|
||||
|
||||
for i := int32(0); i < failedRecordCount; i++ {
|
||||
records = append(records, types.PutRecordsResultEntry{
|
||||
ErrorCode: aws.String("InternalFailure"),
|
||||
ErrorMessage: aws.String("Internal Service Failure"),
|
||||
})
|
||||
}
|
||||
|
||||
m.SetupResponse(failedRecordCount, records)
|
||||
}
|
||||
|
||||
func (m *mockKinesisPutRecords) SetupErrorResponse(err error) {
|
||||
m.responses = append(m.responses, &mockKinesisPutRecordsResponse{
|
||||
Err: err,
|
||||
Output: nil,
|
||||
})
|
||||
}
|
||||
|
||||
func (m *mockKinesisPutRecords) PutRecords(_ context.Context, input *kinesis.PutRecordsInput, _ ...func(*kinesis.Options)) (*kinesis.PutRecordsOutput, error) {
|
||||
reqNum := len(m.requests)
|
||||
if reqNum > len(m.responses) {
|
||||
return nil, fmt.Errorf("response for request %+v not setup", reqNum)
|
||||
}
|
||||
|
||||
m.requests = append(m.requests, input)
|
||||
|
||||
resp := m.responses[reqNum]
|
||||
return resp.Output, resp.Err
|
||||
}
|
||||
|
||||
func (m *mockKinesisPutRecords) AssertRequests(
|
||||
t *testing.T,
|
||||
expected []*kinesis.PutRecordsInput,
|
||||
) {
|
||||
require.Lenf(t, m.requests, len(expected), "Expected %v requests", len(expected))
|
||||
|
||||
for i, expectedInput := range expected {
|
||||
actualInput := m.requests[i]
|
||||
|
||||
require.Equalf(t,
|
||||
expectedInput.StreamName,
|
||||
actualInput.StreamName,
|
||||
"Expected request %v to have correct StreamName", i,
|
||||
)
|
||||
|
||||
require.Lenf(t, actualInput.Records, len(expectedInput.Records), "Expected request %v to have %v Records", i, len(expectedInput.Records))
|
||||
|
||||
for r, expectedRecord := range expectedInput.Records {
|
||||
actualRecord := actualInput.Records[r]
|
||||
|
||||
require.Equalf(t,
|
||||
expectedRecord.PartitionKey,
|
||||
actualRecord.PartitionKey,
|
||||
"Expected (request %v, record %v) to have correct PartitionKey", i, r,
|
||||
)
|
||||
|
||||
require.Equalf(t,
|
||||
expectedRecord.ExplicitHashKey,
|
||||
actualRecord.ExplicitHashKey,
|
||||
"Expected (request %v, record %v) to have correct ExplicitHashKey", i, r,
|
||||
)
|
||||
|
||||
require.Equalf(t,
|
||||
expectedRecord.Data,
|
||||
actualRecord.Data,
|
||||
"Expected (request %v, record %v) to have correct Data", i, r,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createTestMetric(t *testing.T, name string, serializer telegraf.Serializer) (telegraf.Metric, []byte) {
|
||||
metric := testutil.TestMetric(1, name)
|
||||
|
||||
data, err := serializer.Serialize(metric)
|
||||
require.NoError(t, err)
|
||||
|
||||
return metric, data
|
||||
}
|
||||
|
||||
func createTestMetrics(t *testing.T, count uint32, serializer telegraf.Serializer) ([]telegraf.Metric, [][]byte) {
|
||||
metrics := make([]telegraf.Metric, 0, count)
|
||||
metricsData := make([][]byte, 0, count)
|
||||
|
||||
for i := uint32(0); i < count; i++ {
|
||||
name := fmt.Sprintf("metric%d", i)
|
||||
metric, data := createTestMetric(t, name, serializer)
|
||||
metrics = append(metrics, metric)
|
||||
metricsData = append(metricsData, data)
|
||||
}
|
||||
|
||||
return metrics, metricsData
|
||||
}
|
||||
|
||||
func createPutRecordsRequestEntries(
|
||||
metricsData [][]byte,
|
||||
) []types.PutRecordsRequestEntry {
|
||||
records := make([]types.PutRecordsRequestEntry, 0, len(metricsData))
|
||||
|
||||
for _, data := range metricsData {
|
||||
records = append(records, types.PutRecordsRequestEntry{
|
||||
PartitionKey: aws.String(testPartitionKey),
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
|
||||
return records
|
||||
}
|
66
plugins/outputs/kinesis/sample.conf
Normal file
66
plugins/outputs/kinesis/sample.conf
Normal file
|
@ -0,0 +1,66 @@
|
|||
# Configuration for the AWS Kinesis output.
|
||||
[[outputs.kinesis]]
|
||||
## Amazon REGION of kinesis endpoint.
|
||||
region = "ap-southeast-2"
|
||||
|
||||
## Amazon Credentials
|
||||
## Credentials are loaded in the following order
|
||||
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
|
||||
## 2) Assumed credentials via STS if role_arn is specified
|
||||
## 3) explicit credentials from 'access_key' and 'secret_key'
|
||||
## 4) shared profile from 'profile'
|
||||
## 5) environment variables
|
||||
## 6) shared credentials file
|
||||
## 7) EC2 Instance Profile
|
||||
#access_key = ""
|
||||
#secret_key = ""
|
||||
#token = ""
|
||||
#role_arn = ""
|
||||
#web_identity_token_file = ""
|
||||
#role_session_name = ""
|
||||
#profile = ""
|
||||
#shared_credential_file = ""
|
||||
|
||||
## Endpoint to make request against, the correct endpoint is automatically
|
||||
## determined and this option should only be set if you wish to override the
|
||||
## default.
|
||||
## ex: endpoint_url = "http://localhost:8000"
|
||||
# endpoint_url = ""
|
||||
|
||||
## Kinesis StreamName must exist prior to starting telegraf.
|
||||
streamname = "StreamName"
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## debug will show upstream aws messages.
|
||||
debug = false
|
||||
|
||||
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
|
||||
## plugin definition, otherwise additional config options are read as part of
|
||||
## the table
|
||||
|
||||
## The partition key can be calculated using one of several methods:
|
||||
##
|
||||
## Use a static value for all writes:
|
||||
# [outputs.kinesis.partition]
|
||||
# method = "static"
|
||||
# key = "howdy"
|
||||
#
|
||||
## Use a random partition key on each write:
|
||||
# [outputs.kinesis.partition]
|
||||
# method = "random"
|
||||
#
|
||||
## Use the measurement name as the partition key:
|
||||
# [outputs.kinesis.partition]
|
||||
# method = "measurement"
|
||||
#
|
||||
## Use the value of a tag for all writes, if the tag is not set the empty
|
||||
## default option will be used. When no default, defaults to "telegraf"
|
||||
# [outputs.kinesis.partition]
|
||||
# method = "tag"
|
||||
# key = "host"
|
||||
# default = "mykey"
|
Loading…
Add table
Add a link
Reference in a new issue