Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
344
plugins/inputs/cloudwatch/README.md
Normal file
344
plugins/inputs/cloudwatch/README.md
Normal file
|
@ -0,0 +1,344 @@
|
|||
# Amazon CloudWatch Statistics Input Plugin
|
||||
|
||||
This plugin will gather metric statistics from [Amazon CloudWatch][cloudwatch].
|
||||
|
||||
⭐ Telegraf v0.12.1
|
||||
🏷️ cloud
|
||||
💻 all
|
||||
|
||||
[cloudwatch]: https://aws.amazon.com/cloudwatch
|
||||
|
||||
## Amazon Authentication
|
||||
|
||||
This plugin uses a credential chain for Authentication with the CloudWatch
|
||||
API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
|
||||
1. Assumed credentials via STS if `role_arn` attribute is specified
|
||||
(source credentials are evaluated from subsequent rules)
|
||||
2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes
|
||||
3. Shared profile from `profile` attribute
|
||||
4. [Environment Variables][env]
|
||||
5. [Shared Credentials][credentials]
|
||||
6. [EC2 Instance Profile][iam-roles]
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Pull Metric Statistics from Amazon CloudWatch
|
||||
[[inputs.cloudwatch]]
|
||||
## Amazon Region
|
||||
region = "us-east-1"
|
||||
|
||||
## Amazon Credentials
|
||||
## Credentials are loaded in the following order
|
||||
## 1) Web identity provider credentials via STS if role_arn and
|
||||
## web_identity_token_file are specified
|
||||
## 2) Assumed credentials via STS if role_arn is specified
|
||||
## 3) explicit credentials from 'access_key' and 'secret_key'
|
||||
## 4) shared profile from 'profile'
|
||||
## 5) environment variables
|
||||
## 6) shared credentials file
|
||||
## 7) EC2 Instance Profile
|
||||
# access_key = ""
|
||||
# secret_key = ""
|
||||
# token = ""
|
||||
# role_arn = ""
|
||||
# web_identity_token_file = ""
|
||||
# role_session_name = ""
|
||||
# profile = ""
|
||||
# shared_credential_file = ""
|
||||
|
||||
## If you are using CloudWatch cross-account observability, you can
|
||||
## set IncludeLinkedAccounts to true in a monitoring account
|
||||
## and collect metrics from the linked source accounts
|
||||
# include_linked_accounts = false
|
||||
|
||||
## Endpoint to make request against, the correct endpoint is automatically
|
||||
## determined and this option should only be set if you wish to override the
|
||||
## default.
|
||||
## ex: endpoint_url = "http://localhost:8000"
|
||||
# endpoint_url = ""
|
||||
|
||||
## Set http_proxy
|
||||
# use_system_proxy = false
|
||||
# http_proxy_url = "http://localhost:8888"
|
||||
|
||||
## The minimum period for Cloudwatch metrics is 1 minute (60s). However not
|
||||
## all metrics are made available to the 1 minute period. Some are collected
|
||||
## at 3 minute, 5 minute, or larger intervals.
|
||||
## See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
## Note that if a period is configured that is smaller than the minimum for a
|
||||
## particular metric, that metric will not be returned by the Cloudwatch API
|
||||
## and will not be collected by Telegraf.
|
||||
#
|
||||
## Requested CloudWatch aggregation Period (required)
|
||||
## Must be a multiple of 60s.
|
||||
period = "5m"
|
||||
|
||||
## Collection Delay (required)
|
||||
## Must account for metrics availability via CloudWatch API
|
||||
delay = "5m"
|
||||
|
||||
## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
## gaps or overlap in pulled data
|
||||
interval = "5m"
|
||||
|
||||
## Recommended if "delay" and "period" are both within 3 hours of request
|
||||
## time. Invalid values will be ignored. Recently Active feature will only
|
||||
## poll for CloudWatch ListMetrics values that occurred within the last 3h.
|
||||
## If enabled, it will reduce total API usage of the CloudWatch ListMetrics
|
||||
## API and require less memory to retain.
|
||||
## Do not enable if "period" or "delay" is longer than 3 hours, as it will
|
||||
## not return data more than 3 hours old.
|
||||
## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html
|
||||
# recently_active = "PT3H"
|
||||
|
||||
## Configure the TTL for the internal cache of metrics.
|
||||
# cache_ttl = "1h"
|
||||
|
||||
## Metric Statistic Namespaces, wildcards are allowed
|
||||
# namespaces = ["*"]
|
||||
|
||||
## Metric Format
|
||||
## This determines the format of the produces metrics. 'sparse', the default
|
||||
## will produce a unique field for each statistic. 'dense' will report all
|
||||
## statistics will be in a field called value and have a metric_name tag
|
||||
## defining the name of the statistic. See the plugin README for examples.
|
||||
# metric_format = "sparse"
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit
|
||||
## is 50 reqs/sec, so if you define multiple namespaces, these should add up
|
||||
## to a maximum of 50.
|
||||
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
|
||||
# ratelimit = 25
|
||||
|
||||
## Timeout for http requests made by the cloudwatch client.
|
||||
# timeout = "5s"
|
||||
|
||||
## Batch Size
|
||||
## The size of each batch to send requests to Cloudwatch. 500 is the
|
||||
## suggested largest size. If a request gets to large (413 errors), consider
|
||||
## reducing this amount.
|
||||
# batch_size = 500
|
||||
|
||||
## Namespace-wide statistic filters. These allow fewer queries to be made to
|
||||
## cloudwatch.
|
||||
# statistic_include = ["average", "sum", "minimum", "maximum", sample_count"]
|
||||
# statistic_exclude = []
|
||||
|
||||
## Metrics to Pull
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
## Refreshes Namespace available metrics every 1h
|
||||
#[[inputs.cloudwatch.metrics]]
|
||||
# names = ["Latency", "RequestCount"]
|
||||
#
|
||||
# ## Statistic filters for Metric. These allow for retrieving specific
|
||||
# ## statistics for an individual metric.
|
||||
# # statistic_include = ["average", "sum", "minimum", "maximum", sample_count"]
|
||||
# # statistic_exclude = []
|
||||
#
|
||||
# ## Dimension filters for Metric.
|
||||
# ## All dimensions defined for the metric names must be specified in order
|
||||
# ## to retrieve the metric statistics.
|
||||
# ## 'value' has wildcard / 'glob' matching support such as 'p-*'.
|
||||
# [[inputs.cloudwatch.metrics.dimensions]]
|
||||
# name = "LoadBalancerName"
|
||||
# value = "p-example"
|
||||
```
|
||||
|
||||
Please note, the `namespace` option is deprecated in favor of the `namespaces`
|
||||
list option.
|
||||
|
||||
## Requirements and Terminology
|
||||
|
||||
Plugin Configuration utilizes [CloudWatch concepts][concept] and access
|
||||
pattern to allow monitoring of any CloudWatch Metric.
|
||||
|
||||
- `region` must be a valid AWS [region][] value
|
||||
- `period` must be a valid CloudWatch [period][] value
|
||||
- `namespaces` must be a list of valid CloudWatch [namespace][] value(s)
|
||||
- `names` must be valid CloudWatch [metric][] names
|
||||
- `dimensions` must be valid CloudWatch [dimension][] name/value pairs
|
||||
|
||||
Omitting or specifying a value of `'*'` for a dimension value configures all
|
||||
available metrics that contain a dimension with the specified name to be
|
||||
retrieved. If specifying >1 dimension, then the metric must contain *all* the
|
||||
configured dimensions where the value of the wildcard dimension is ignored.
|
||||
|
||||
Example:
|
||||
|
||||
```toml
|
||||
[[inputs.cloudwatch]]
|
||||
period = "1m"
|
||||
interval = "5m"
|
||||
|
||||
[[inputs.cloudwatch.metrics]]
|
||||
names = ["Latency"]
|
||||
|
||||
## Dimension filters for Metric (optional)
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = "LoadBalancerName"
|
||||
value = "p-example"
|
||||
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = "AvailabilityZone"
|
||||
value = "*"
|
||||
```
|
||||
|
||||
If the following ELBs are available:
|
||||
|
||||
- name: `p-example`, availabilityZone: `us-east-1a`
|
||||
- name: `p-example`, availabilityZone: `us-east-1b`
|
||||
- name: `q-example`, availabilityZone: `us-east-1a`
|
||||
- name: `q-example`, availabilityZone: `us-east-1b`
|
||||
|
||||
Then 2 metrics will be output:
|
||||
|
||||
- name: `p-example`, availabilityZone: `us-east-1a`
|
||||
- name: `p-example`, availabilityZone: `us-east-1b`
|
||||
|
||||
If the `AvailabilityZone` wildcard dimension was omitted, then a single metric
|
||||
(name: `p-example`) would be exported containing the aggregate values of the ELB
|
||||
across availability zones.
|
||||
|
||||
To maximize efficiency and savings, consider making fewer requests by increasing
|
||||
`interval` but keeping `period` at the duration you would like metrics to be
|
||||
reported. The above example will request metrics from Cloudwatch every 5 minutes
|
||||
but will output five metrics timestamped one minute apart.
|
||||
|
||||
## Restrictions and Limitations
|
||||
|
||||
- CloudWatch metrics are not available instantly via the CloudWatch API.
|
||||
You should adjust your collection `delay` to account for this lag in metrics
|
||||
availability based on your [monitoring subscription level][using]
|
||||
- CloudWatch API usage incurs cost - see [GetMetricData Pricing][pricing]
|
||||
|
||||
## Metrics
|
||||
|
||||
Each CloudWatch Namespace monitored records a measurement with fields for each
|
||||
available Metric Statistic. Namespace and Metrics are represented in [snake
|
||||
case](https://en.wikipedia.org/wiki/Snake_case)
|
||||
|
||||
### Sparse Metrics
|
||||
|
||||
By default, metrics generated by this plugin are sparse. Use the `metric_format`
|
||||
option to override this setting.
|
||||
|
||||
Sparse metrics produce a set of fields for every AWS Metric.
|
||||
|
||||
- cloudwatch_{namespace}
|
||||
- Fields
|
||||
- {metric}_sum (metric Sum value)
|
||||
- {metric}_average (metric Average value)
|
||||
- {metric}_minimum (metric Minimum value)
|
||||
- {metric}_maximum (metric Maximum value)
|
||||
- {metric}_sample_count (metric SampleCount value)
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
cloudwatch_aws_usage,class=None,resource=GetSecretValue,service=Secrets\ Manager,type=API call_count_maximum=1,call_count_minimum=1,call_count_sum=8,call_count_sample_count=8,call_count_average=1 1715097720000000000
|
||||
```
|
||||
|
||||
### Dense Metrics
|
||||
|
||||
Dense metrics are generated when `metric_format` is set to `dense`.
|
||||
|
||||
Dense metrics use the same fields over and over for every AWS Metric and
|
||||
differentiate between AWS Metrics using a tag called `metric_name` with the AWS
|
||||
Metric name:
|
||||
|
||||
- cloudwatch_{namespace}
|
||||
- Tags
|
||||
- metric_name (AWS Metric name)
|
||||
- Fields
|
||||
- sum (metric Sum value)
|
||||
- average (metric Average value)
|
||||
- minimum (metric Minimum value)
|
||||
- maximum (metric Maximum value)
|
||||
- sample_count (metric SampleCount value)
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
cloudwatch_aws_usage,class=None,resource=GetSecretValue,service=Secrets\ Manager,metric_name=call_count,type=API sum=6,sample_count=6,average=1,maximum=1,minimum=1 1715097840000000000
|
||||
```
|
||||
|
||||
### Tags
|
||||
|
||||
Each measurement is tagged with the following identifiers to uniquely identify
|
||||
the associated metric Tag Dimension names are represented in [snake
|
||||
case](https://en.wikipedia.org/wiki/Snake_case)
|
||||
|
||||
- All measurements have the following tags:
|
||||
- region (CloudWatch Region)
|
||||
- {dimension-name} (Cloudwatch Dimension value - one per metric dimension)
|
||||
- If `include_linked_accounts` is set to true then below tag is also provided:
|
||||
- account (The ID of the account where the metrics are located.)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
You can use the aws cli to get a list of available metrics and dimensions:
|
||||
|
||||
```shell
|
||||
aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1
|
||||
aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1 --metric-name CPUCreditBalance
|
||||
```
|
||||
|
||||
If the expected metrics are not returned, you can try getting them manually
|
||||
for a short period of time:
|
||||
|
||||
```shell
|
||||
aws cloudwatch get-metric-data \
|
||||
--start-time 2018-07-01T00:00:00Z \
|
||||
--end-time 2018-07-01T00:15:00Z \
|
||||
--metric-data-queries '[
|
||||
{
|
||||
"Id": "avgCPUCreditBalance",
|
||||
"MetricStat": {
|
||||
"Metric": {
|
||||
"Namespace": "AWS/EC2",
|
||||
"MetricName": "CPUCreditBalance",
|
||||
"Dimensions": [
|
||||
{
|
||||
"Name": "InstanceId",
|
||||
"Value": "i-deadbeef"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Period": 300,
|
||||
"Stat": "Average"
|
||||
},
|
||||
"Label": "avgCPUCreditBalance"
|
||||
}
|
||||
]'
|
||||
```
|
||||
|
||||
## Example Output
|
||||
|
||||
See the discussion above about sparse vs dense metrics for more details.
|
||||
|
||||
```text
|
||||
cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000
|
||||
```
|
||||
|
||||
[concept]: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html
|
||||
[credentials]: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#shared-credentials-file
|
||||
[dimension]: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Dimension
|
||||
[env]: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#environment-variables
|
||||
[iam-roles]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
[metric]: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Metric
|
||||
[namespace]: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Namespace
|
||||
[period]: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#CloudWatchPeriods
|
||||
[pricing]: https://aws.amazon.com/cloudwatch/pricing/
|
||||
[region]: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#CloudWatchRegions
|
||||
[using]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html
|
490
plugins/inputs/cloudwatch/cloudwatch.go
Normal file
490
plugins/inputs/cloudwatch/cloudwatch.go
Normal file
|
@ -0,0 +1,490 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package cloudwatch
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/cloudwatch"
|
||||
"github.com/aws/aws-sdk-go-v2/service/cloudwatch/types"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal/limiter"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
common_aws "github.com/influxdata/telegraf/plugins/common/aws"
|
||||
"github.com/influxdata/telegraf/plugins/common/proxy"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type CloudWatch struct {
|
||||
StatisticExclude []string `toml:"statistic_exclude"`
|
||||
StatisticInclude []string `toml:"statistic_include"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
|
||||
proxy.HTTPProxy
|
||||
|
||||
Period config.Duration `toml:"period"`
|
||||
Delay config.Duration `toml:"delay"`
|
||||
Namespace string `toml:"namespace" deprecated:"1.25.0;1.35.0;use 'namespaces' instead"`
|
||||
Namespaces []string `toml:"namespaces"`
|
||||
Metrics []*cloudwatchMetric `toml:"metrics"`
|
||||
CacheTTL config.Duration `toml:"cache_ttl"`
|
||||
RateLimit int `toml:"ratelimit"`
|
||||
RecentlyActive string `toml:"recently_active"`
|
||||
BatchSize int `toml:"batch_size"`
|
||||
IncludeLinkedAccounts bool `toml:"include_linked_accounts"`
|
||||
MetricFormat string `toml:"metric_format"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
common_aws.CredentialConfig
|
||||
|
||||
client cloudwatchClient
|
||||
nsFilter filter.Filter
|
||||
statFilter filter.Filter
|
||||
cache *metricCache
|
||||
queryDimensions map[string]*map[string]string
|
||||
windowStart time.Time
|
||||
windowEnd time.Time
|
||||
}
|
||||
|
||||
type cloudwatchMetric struct {
|
||||
MetricNames []string `toml:"names"`
|
||||
Dimensions []*dimension `toml:"dimensions"`
|
||||
StatisticExclude *[]string `toml:"statistic_exclude"`
|
||||
StatisticInclude *[]string `toml:"statistic_include"`
|
||||
}
|
||||
|
||||
type dimension struct {
|
||||
Name string `toml:"name"`
|
||||
Value string `toml:"value"`
|
||||
valueMatcher filter.Filter
|
||||
}
|
||||
|
||||
type metricCache struct {
|
||||
ttl time.Duration
|
||||
built time.Time
|
||||
metrics []filteredMetric
|
||||
queries map[string][]types.MetricDataQuery
|
||||
}
|
||||
|
||||
type filteredMetric struct {
|
||||
metrics []types.Metric
|
||||
accounts []string
|
||||
statFilter filter.Filter
|
||||
}
|
||||
|
||||
type cloudwatchClient interface {
|
||||
ListMetrics(context.Context, *cloudwatch.ListMetricsInput, ...func(*cloudwatch.Options)) (*cloudwatch.ListMetricsOutput, error)
|
||||
GetMetricData(context.Context, *cloudwatch.GetMetricDataInput, ...func(*cloudwatch.Options)) (*cloudwatch.GetMetricDataOutput, error)
|
||||
}
|
||||
|
||||
func (*CloudWatch) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (c *CloudWatch) Init() error {
|
||||
// For backward compatibility
|
||||
if len(c.Namespace) != 0 {
|
||||
c.Namespaces = append(c.Namespaces, c.Namespace)
|
||||
}
|
||||
|
||||
// Check user settings
|
||||
switch c.MetricFormat {
|
||||
case "":
|
||||
c.MetricFormat = "sparse"
|
||||
case "dense", "sparse":
|
||||
default:
|
||||
return fmt.Errorf("invalid metric_format: %s", c.MetricFormat)
|
||||
}
|
||||
|
||||
// Setup the cloudwatch client
|
||||
proxyFunc, err := c.HTTPProxy.Proxy()
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating proxy failed: %w", err)
|
||||
}
|
||||
|
||||
creds, err := c.CredentialConfig.Credentials()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting credentials failed: %w", err)
|
||||
}
|
||||
|
||||
c.client = cloudwatch.NewFromConfig(creds, func(options *cloudwatch.Options) {
|
||||
if c.CredentialConfig.EndpointURL != "" && c.CredentialConfig.Region != "" {
|
||||
options.BaseEndpoint = &c.CredentialConfig.EndpointURL
|
||||
}
|
||||
|
||||
options.ClientLogMode = 0
|
||||
options.HTTPClient = &http.Client{
|
||||
// use values from DefaultTransport
|
||||
Transport: &http.Transport{
|
||||
Proxy: proxyFunc,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
},
|
||||
Timeout: time.Duration(c.Timeout),
|
||||
}
|
||||
})
|
||||
|
||||
for _, m := range c.Metrics {
|
||||
// Sort the metrics for efficient comparison later
|
||||
slices.SortStableFunc(m.Dimensions, func(a, b *dimension) int {
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
})
|
||||
// Initialize filter for metric dimensions to include
|
||||
for _, dimension := range m.Dimensions {
|
||||
matcher, err := filter.NewIncludeExcludeFilter([]string{dimension.Value}, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating dimension filter for dimension %q failed: %w", dimension, err)
|
||||
}
|
||||
dimension.valueMatcher = matcher
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize statistics-type filter
|
||||
c.statFilter, err = filter.NewIncludeExcludeFilter(c.StatisticInclude, c.StatisticExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating statistics filter failed: %w", err)
|
||||
}
|
||||
|
||||
// Initialize namespace filter
|
||||
c.nsFilter, err = filter.Compile(c.Namespaces)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating namespace filter failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
filteredMetrics, err := c.getFilteredMetrics()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.updateWindow(time.Now())
|
||||
|
||||
// Get all of the possible queries so we can send groups of 100.
|
||||
queries := c.getDataQueries(filteredMetrics)
|
||||
if len(queries) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Limit concurrency or we can easily exhaust user connection limit.
|
||||
// See cloudwatch API request limits:
|
||||
// http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
|
||||
lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second)
|
||||
defer lmtr.Stop()
|
||||
wg := sync.WaitGroup{}
|
||||
rLock := sync.Mutex{}
|
||||
|
||||
results := make(map[string][]types.MetricDataResult)
|
||||
for namespace, namespacedQueries := range queries {
|
||||
var batches [][]types.MetricDataQuery
|
||||
|
||||
for c.BatchSize < len(namespacedQueries) {
|
||||
namespacedQueries, batches = namespacedQueries[c.BatchSize:], append(batches, namespacedQueries[0:c.BatchSize:c.BatchSize])
|
||||
}
|
||||
batches = append(batches, namespacedQueries)
|
||||
|
||||
for i := range batches {
|
||||
wg.Add(1)
|
||||
<-lmtr.C
|
||||
go func(n string, inm []types.MetricDataQuery) {
|
||||
defer wg.Done()
|
||||
result, err := c.gatherMetrics(inm)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
rLock.Lock()
|
||||
results[n] = append(results[n], result...)
|
||||
rLock.Unlock()
|
||||
}(namespace, batches[i])
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
c.aggregateMetrics(acc, results)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CloudWatch) getFilteredMetrics() ([]filteredMetric, error) {
|
||||
if c.cache != nil && c.cache.metrics != nil && time.Since(c.cache.built) < c.cache.ttl {
|
||||
return c.cache.metrics, nil
|
||||
}
|
||||
|
||||
// Get all metrics from cloudwatch for filtering
|
||||
params := &cloudwatch.ListMetricsInput{
|
||||
IncludeLinkedAccounts: &c.IncludeLinkedAccounts,
|
||||
}
|
||||
if c.RecentlyActive == "PT3H" {
|
||||
params.RecentlyActive = types.RecentlyActivePt3h
|
||||
}
|
||||
|
||||
// Return the subset of metrics matching the namespace and at one of the
|
||||
// metric definitions if any
|
||||
var metrics []types.Metric
|
||||
var accounts []string
|
||||
for {
|
||||
resp, err := c.client.ListMetrics(context.Background(), params)
|
||||
if err != nil {
|
||||
c.Log.Errorf("failed to list metrics: %v", err)
|
||||
break
|
||||
}
|
||||
c.Log.Tracef("got %d metrics with %d accounts", len(resp.Metrics), len(resp.OwningAccounts))
|
||||
for i, m := range resp.Metrics {
|
||||
if c.Log.Level().Includes(telegraf.Trace) {
|
||||
dims := make([]string, 0, len(m.Dimensions))
|
||||
for _, d := range m.Dimensions {
|
||||
dims = append(dims, *d.Name+"="+*d.Value)
|
||||
}
|
||||
a := "none"
|
||||
if len(resp.OwningAccounts) > 0 {
|
||||
a = resp.OwningAccounts[i]
|
||||
}
|
||||
c.Log.Tracef(" metric %3d: %s (%s): %s [%s]\n", i, *m.MetricName, *m.Namespace, strings.Join(dims, ", "), a)
|
||||
}
|
||||
|
||||
if c.nsFilter != nil && !c.nsFilter.Match(*m.Namespace) {
|
||||
c.Log.Trace(" -> rejected by namespace")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(c.Metrics) > 0 && !slices.ContainsFunc(c.Metrics, func(cm *cloudwatchMetric) bool {
|
||||
return metricMatch(cm, m)
|
||||
}) {
|
||||
c.Log.Trace(" -> rejected by metric mismatch")
|
||||
continue
|
||||
}
|
||||
c.Log.Trace(" -> keeping metric")
|
||||
|
||||
metrics = append(metrics, m)
|
||||
if len(resp.OwningAccounts) > 0 {
|
||||
accounts = append(accounts, resp.OwningAccounts[i])
|
||||
}
|
||||
}
|
||||
|
||||
if resp.NextToken == nil {
|
||||
break
|
||||
}
|
||||
params.NextToken = resp.NextToken
|
||||
}
|
||||
|
||||
var filtered []filteredMetric
|
||||
if len(c.Metrics) == 0 {
|
||||
filtered = append(filtered, filteredMetric{
|
||||
metrics: metrics,
|
||||
accounts: accounts,
|
||||
statFilter: c.statFilter,
|
||||
})
|
||||
} else {
|
||||
for idx, cm := range c.Metrics {
|
||||
var entry filteredMetric
|
||||
if cm.StatisticInclude == nil && cm.StatisticExclude == nil {
|
||||
entry.statFilter = c.statFilter
|
||||
} else {
|
||||
var includeStats, excludeStats []string
|
||||
if cm.StatisticInclude != nil {
|
||||
includeStats = *cm.StatisticInclude
|
||||
}
|
||||
if cm.StatisticExclude != nil {
|
||||
excludeStats = *cm.StatisticExclude
|
||||
}
|
||||
f, err := filter.NewIncludeExcludeFilter(includeStats, excludeStats)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating statistics filter for metric %d failed: %w", idx+1, err)
|
||||
}
|
||||
entry.statFilter = f
|
||||
}
|
||||
|
||||
for i, m := range metrics {
|
||||
if metricMatch(cm, m) {
|
||||
entry.metrics = append(entry.metrics, m)
|
||||
if len(accounts) > 0 {
|
||||
entry.accounts = append(entry.accounts, accounts[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
filtered = append(filtered, entry)
|
||||
}
|
||||
}
|
||||
|
||||
c.cache = &metricCache{
|
||||
metrics: filtered,
|
||||
built: time.Now(),
|
||||
ttl: time.Duration(c.CacheTTL),
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
func (c *CloudWatch) updateWindow(relativeTo time.Time) {
|
||||
windowEnd := relativeTo.Add(-time.Duration(c.Delay))
|
||||
|
||||
if c.windowEnd.IsZero() {
|
||||
// this is the first run, no window info, so just get a single period
|
||||
c.windowStart = windowEnd.Add(-time.Duration(c.Period))
|
||||
} else {
|
||||
// subsequent window, start where last window left off
|
||||
c.windowStart = c.windowEnd
|
||||
}
|
||||
|
||||
c.windowEnd = windowEnd
|
||||
}
|
||||
|
||||
// getDataQueries gets all of the possible queries so we can maximize the request payload.
|
||||
func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string][]types.MetricDataQuery {
|
||||
if c.cache != nil && c.cache.queries != nil && c.cache.metrics != nil && time.Since(c.cache.built) < c.cache.ttl {
|
||||
return c.cache.queries
|
||||
}
|
||||
|
||||
c.queryDimensions = make(map[string]*map[string]string)
|
||||
dataQueries := make(map[string][]types.MetricDataQuery)
|
||||
for i, filtered := range filteredMetrics {
|
||||
for j, singleMetric := range filtered.metrics {
|
||||
id := strconv.Itoa(j) + "_" + strconv.Itoa(i)
|
||||
dimension := ctod(singleMetric.Dimensions)
|
||||
var accountID *string
|
||||
if c.IncludeLinkedAccounts && len(filtered.accounts) > j {
|
||||
accountID = aws.String(filtered.accounts[j])
|
||||
(*dimension)["account"] = filtered.accounts[j]
|
||||
}
|
||||
|
||||
statisticTypes := map[string]string{
|
||||
"average": "Average",
|
||||
"maximum": "Maximum",
|
||||
"minimum": "Minimum",
|
||||
"sum": "Sum",
|
||||
"sample_count": "SampleCount",
|
||||
}
|
||||
|
||||
for statisticType, statistic := range statisticTypes {
|
||||
if !filtered.statFilter.Match(statisticType) {
|
||||
continue
|
||||
}
|
||||
queryID := statisticType + "_" + id
|
||||
c.queryDimensions[queryID] = dimension
|
||||
dataQueries[*singleMetric.Namespace] = append(dataQueries[*singleMetric.Namespace], types.MetricDataQuery{
|
||||
Id: aws.String(queryID),
|
||||
AccountId: accountID,
|
||||
Label: aws.String(snakeCase(*singleMetric.MetricName + "_" + statisticType)),
|
||||
MetricStat: &types.MetricStat{
|
||||
Metric: &filtered.metrics[j],
|
||||
Period: aws.Int32(int32(time.Duration(c.Period).Seconds())),
|
||||
Stat: aws.String(statistic),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(dataQueries) == 0 {
|
||||
c.Log.Debug("no metrics found to collect")
|
||||
return nil
|
||||
}
|
||||
|
||||
if c.cache == nil {
|
||||
c.cache = &metricCache{
|
||||
queries: dataQueries,
|
||||
built: time.Now(),
|
||||
ttl: time.Duration(c.CacheTTL),
|
||||
}
|
||||
} else {
|
||||
c.cache.queries = dataQueries
|
||||
}
|
||||
|
||||
return dataQueries
|
||||
}
|
||||
|
||||
func (c *CloudWatch) gatherMetrics(queries []types.MetricDataQuery) ([]types.MetricDataResult, error) {
|
||||
params := &cloudwatch.GetMetricDataInput{
|
||||
StartTime: aws.Time(c.windowStart),
|
||||
EndTime: aws.Time(c.windowEnd),
|
||||
MetricDataQueries: queries,
|
||||
}
|
||||
|
||||
results := make([]types.MetricDataResult, 0)
|
||||
for {
|
||||
resp, err := c.client.GetMetricData(context.Background(), params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get metric data: %w", err)
|
||||
}
|
||||
|
||||
results = append(results, resp.MetricDataResults...)
|
||||
if resp.NextToken == nil {
|
||||
break
|
||||
}
|
||||
params.NextToken = resp.NextToken
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (c *CloudWatch) aggregateMetrics(acc telegraf.Accumulator, metricDataResults map[string][]types.MetricDataResult) {
|
||||
grouper := metric.NewSeriesGrouper()
|
||||
for namespace, results := range metricDataResults {
|
||||
namespace = sanitizeMeasurement(namespace)
|
||||
|
||||
for _, result := range results {
|
||||
tags := make(map[string]string)
|
||||
if dimensions, ok := c.queryDimensions[*result.Id]; ok {
|
||||
tags = *dimensions
|
||||
}
|
||||
tags["region"] = c.Region
|
||||
|
||||
for i := range result.Values {
|
||||
if c.MetricFormat == "dense" {
|
||||
// Remove the IDs from the result ID to get the statistic type
|
||||
// e.g. "average" from "average_0_0"
|
||||
re := regexp.MustCompile(`_\d+_\d+$`)
|
||||
statisticType := re.ReplaceAllString(*result.Id, "")
|
||||
|
||||
// Remove the statistic type from the label to get the AWS Metric name
|
||||
// e.g. "CPUUtilization" from "CPUUtilization_average"
|
||||
re = regexp.MustCompile(`_?` + regexp.QuoteMeta(statisticType) + `$`)
|
||||
tags["metric_name"] = re.ReplaceAllString(*result.Label, "")
|
||||
|
||||
grouper.Add(namespace, tags, result.Timestamps[i], statisticType, result.Values[i])
|
||||
} else {
|
||||
grouper.Add(namespace, tags, result.Timestamps[i], *result.Label, result.Values[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, singleMetric := range grouper.Metrics() {
|
||||
acc.AddMetric(singleMetric)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("cloudwatch", func() telegraf.Input {
|
||||
return &CloudWatch{
|
||||
CacheTTL: config.Duration(time.Hour),
|
||||
RateLimit: 25,
|
||||
Timeout: config.Duration(time.Second * 5),
|
||||
BatchSize: 500,
|
||||
}
|
||||
})
|
||||
}
|
700
plugins/inputs/cloudwatch/cloudwatch_test.go
Normal file
700
plugins/inputs/cloudwatch/cloudwatch_test.go
Normal file
|
@ -0,0 +1,700 @@
|
|||
package cloudwatch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/cloudwatch"
|
||||
"github.com/aws/aws-sdk-go-v2/service/cloudwatch/types"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
common_aws "github.com/influxdata/telegraf/plugins/common/aws"
|
||||
"github.com/influxdata/telegraf/plugins/common/proxy"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestSnakeCase(t *testing.T) {
|
||||
require.Equal(t, "cluster_name", snakeCase("Cluster Name"))
|
||||
require.Equal(t, "broker_id", snakeCase("Broker ID"))
|
||||
}
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
plugin := &CloudWatch{
|
||||
CredentialConfig: common_aws.CredentialConfig{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: config.Duration(1 * time.Minute),
|
||||
Period: config.Duration(1 * time.Minute),
|
||||
RateLimit: 200,
|
||||
BatchSize: 500,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
plugin.client = defaultMockClient("AWS/ELB")
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
metric.New(
|
||||
"cloudwatch_aws_elb",
|
||||
map[string]string{
|
||||
"region": "us-east-1",
|
||||
"load_balancer_name": "p-example1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"latency_minimum": 0.1,
|
||||
"latency_maximum": 0.3,
|
||||
"latency_average": 0.2,
|
||||
"latency_sum": 123.0,
|
||||
"latency_sample_count": 100.0,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"cloudwatch_aws_elb",
|
||||
map[string]string{
|
||||
"region": "us-east-1",
|
||||
"load_balancer_name": "p-example2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"latency_minimum": 0.1,
|
||||
"latency_maximum": 0.3,
|
||||
"latency_average": 0.2,
|
||||
"latency_sum": 124.0,
|
||||
"latency_sample_count": 100.0,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
}
|
||||
|
||||
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
|
||||
}
|
||||
|
||||
func TestGatherDenseMetric(t *testing.T) {
|
||||
plugin := &CloudWatch{
|
||||
CredentialConfig: common_aws.CredentialConfig{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: config.Duration(1 * time.Minute),
|
||||
Period: config.Duration(1 * time.Minute),
|
||||
RateLimit: 200,
|
||||
BatchSize: 500,
|
||||
MetricFormat: "dense",
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
plugin.client = defaultMockClient("AWS/ELB")
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
metric.New(
|
||||
"cloudwatch_aws_elb",
|
||||
map[string]string{
|
||||
"region": "us-east-1",
|
||||
"load_balancer_name": "p-example1",
|
||||
"metric_name": "latency",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"minimum": 0.1,
|
||||
"maximum": 0.3,
|
||||
"average": 0.2,
|
||||
"sum": 123.0,
|
||||
"sample_count": 100.0,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"cloudwatch_aws_elb",
|
||||
map[string]string{
|
||||
"region": "us-east-1",
|
||||
"load_balancer_name": "p-example2",
|
||||
"metric_name": "latency",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"minimum": 0.1,
|
||||
"maximum": 0.3,
|
||||
"average": 0.2,
|
||||
"sum": 124.0,
|
||||
"sample_count": 100.0,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
}
|
||||
|
||||
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
|
||||
}
|
||||
|
||||
func TestMultiAccountGather(t *testing.T) {
|
||||
plugin := &CloudWatch{
|
||||
CredentialConfig: common_aws.CredentialConfig{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: config.Duration(1 * time.Minute),
|
||||
Period: config.Duration(1 * time.Minute),
|
||||
RateLimit: 200,
|
||||
BatchSize: 500,
|
||||
Log: testutil.Logger{},
|
||||
IncludeLinkedAccounts: true,
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
plugin.client = defaultMockClient("AWS/ELB")
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
metric.New(
|
||||
"cloudwatch_aws_elb",
|
||||
map[string]string{
|
||||
"region": "us-east-1",
|
||||
"load_balancer_name": "p-example1",
|
||||
"account": "123456789012",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"latency_minimum": 0.1,
|
||||
"latency_maximum": 0.3,
|
||||
"latency_average": 0.2,
|
||||
"latency_sum": 123.0,
|
||||
"latency_sample_count": 100.0,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"cloudwatch_aws_elb",
|
||||
map[string]string{
|
||||
"region": "us-east-1",
|
||||
"load_balancer_name": "p-example2",
|
||||
"account": "923456789017",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"latency_minimum": 0.1,
|
||||
"latency_maximum": 0.3,
|
||||
"latency_average": 0.2,
|
||||
"latency_sum": 124.0,
|
||||
"latency_sample_count": 100.0,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
}
|
||||
|
||||
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
|
||||
}
|
||||
|
||||
func TestGatherMultipleNamespaces(t *testing.T) {
|
||||
plugin := &CloudWatch{
|
||||
CredentialConfig: common_aws.CredentialConfig{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Namespaces: []string{"AWS/ELB", "AWS/EC2"},
|
||||
Delay: config.Duration(1 * time.Minute),
|
||||
Period: config.Duration(1 * time.Minute),
|
||||
RateLimit: 200,
|
||||
BatchSize: 500,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
plugin.client = defaultMockClient("AWS/ELB", "AWS/EC2")
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
metric.New(
|
||||
"cloudwatch_aws_elb",
|
||||
map[string]string{
|
||||
"region": "us-east-1",
|
||||
"load_balancer_name": "p-example1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"latency_minimum": 0.1,
|
||||
"latency_maximum": 0.3,
|
||||
"latency_average": 0.2,
|
||||
"latency_sum": 123.0,
|
||||
"latency_sample_count": 100.0,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"cloudwatch_aws_elb",
|
||||
map[string]string{
|
||||
"region": "us-east-1",
|
||||
"load_balancer_name": "p-example2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"latency_minimum": 0.1,
|
||||
"latency_maximum": 0.3,
|
||||
"latency_average": 0.2,
|
||||
"latency_sum": 124.0,
|
||||
"latency_sample_count": 100.0,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"cloudwatch_aws_ec2",
|
||||
map[string]string{
|
||||
"region": "us-east-1",
|
||||
"load_balancer_name": "p-example1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"latency_minimum": 0.1,
|
||||
"latency_maximum": 0.3,
|
||||
"latency_average": 0.2,
|
||||
"latency_sum": 123.0,
|
||||
"latency_sample_count": 100.0,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
metric.New(
|
||||
"cloudwatch_aws_ec2",
|
||||
map[string]string{
|
||||
"region": "us-east-1",
|
||||
"load_balancer_name": "p-example2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"latency_minimum": 0.1,
|
||||
"latency_maximum": 0.3,
|
||||
"latency_average": 0.2,
|
||||
"latency_sum": 124.0,
|
||||
"latency_sample_count": 100.0,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
}
|
||||
|
||||
option := []cmp.Option{
|
||||
testutil.IgnoreTime(),
|
||||
testutil.SortMetrics(),
|
||||
}
|
||||
|
||||
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), option...)
|
||||
}
|
||||
|
||||
func TestSelectMetrics(t *testing.T) {
|
||||
plugin := &CloudWatch{
|
||||
CredentialConfig: common_aws.CredentialConfig{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: config.Duration(1 * time.Minute),
|
||||
Period: config.Duration(1 * time.Minute),
|
||||
RateLimit: 200,
|
||||
BatchSize: 500,
|
||||
Metrics: []*cloudwatchMetric{
|
||||
{
|
||||
MetricNames: []string{"Latency", "RequestCount"},
|
||||
Dimensions: []*dimension{
|
||||
{
|
||||
Name: "LoadBalancerName",
|
||||
Value: "lb*",
|
||||
},
|
||||
{
|
||||
Name: "AvailabilityZone",
|
||||
Value: "us-east*",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
plugin.client = selectedMockClient()
|
||||
filtered, err := plugin.getFilteredMetrics()
|
||||
require.NoError(t, err)
|
||||
|
||||
// We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2
|
||||
// AZs. We should get 12 metrics.
|
||||
require.Len(t, filtered[0].metrics, 12)
|
||||
}
|
||||
|
||||
func TestSelectMetricsSummaryOnly(t *testing.T) {
|
||||
plugin := &CloudWatch{
|
||||
CredentialConfig: common_aws.CredentialConfig{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: config.Duration(1 * time.Minute),
|
||||
Period: config.Duration(1 * time.Minute),
|
||||
RateLimit: 200,
|
||||
BatchSize: 500,
|
||||
Metrics: []*cloudwatchMetric{
|
||||
{
|
||||
MetricNames: []string{"Latency", "RequestCount"},
|
||||
Dimensions: []*dimension{
|
||||
{
|
||||
Name: "LoadBalancerName",
|
||||
Value: "lb*",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
plugin.client = selectedMockClient()
|
||||
filtered, err := plugin.getFilteredMetrics()
|
||||
require.NoError(t, err)
|
||||
|
||||
// We've asked for the non-AU specific metrics only so this should be
|
||||
// 2 (out of 4) metrics for all 3 load balancers but no AZ.
|
||||
require.Len(t, filtered[0].metrics, 6)
|
||||
}
|
||||
|
||||
func TestGenerateStatisticsInputParams(t *testing.T) {
|
||||
d := types.Dimension{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String("p-example"),
|
||||
}
|
||||
|
||||
namespace := "AWS/ELB"
|
||||
m := types.Metric{
|
||||
MetricName: aws.String("Latency"),
|
||||
Dimensions: []types.Dimension{d},
|
||||
Namespace: aws.String(namespace),
|
||||
}
|
||||
|
||||
plugin := &CloudWatch{
|
||||
Namespaces: []string{namespace},
|
||||
Delay: config.Duration(1 * time.Minute),
|
||||
Period: config.Duration(1 * time.Minute),
|
||||
BatchSize: 500,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
|
||||
now := time.Now()
|
||||
|
||||
plugin.updateWindow(now)
|
||||
|
||||
statFilter, err := filter.NewIncludeExcludeFilter(nil, nil)
|
||||
require.NoError(t, err)
|
||||
queries := plugin.getDataQueries([]filteredMetric{{metrics: []types.Metric{m}, statFilter: statFilter}})
|
||||
params := &cloudwatch.GetMetricDataInput{
|
||||
StartTime: aws.Time(plugin.windowStart),
|
||||
EndTime: aws.Time(plugin.windowEnd),
|
||||
MetricDataQueries: queries[namespace],
|
||||
}
|
||||
|
||||
require.EqualValues(t, *params.EndTime, now.Add(-time.Duration(plugin.Delay)))
|
||||
require.EqualValues(t, *params.StartTime, now.Add(-time.Duration(plugin.Period)).Add(-time.Duration(plugin.Delay)))
|
||||
require.Len(t, params.MetricDataQueries, 5)
|
||||
require.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1)
|
||||
require.EqualValues(t, 60, *params.MetricDataQueries[0].MetricStat.Period)
|
||||
}
|
||||
|
||||
func TestGenerateStatisticsInputParamsFiltered(t *testing.T) {
|
||||
d := types.Dimension{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String("p-example"),
|
||||
}
|
||||
|
||||
namespace := "AWS/ELB"
|
||||
m := types.Metric{
|
||||
MetricName: aws.String("Latency"),
|
||||
Dimensions: []types.Dimension{d},
|
||||
Namespace: aws.String(namespace),
|
||||
}
|
||||
|
||||
plugin := &CloudWatch{
|
||||
Namespaces: []string{namespace},
|
||||
Delay: config.Duration(1 * time.Minute),
|
||||
Period: config.Duration(1 * time.Minute),
|
||||
BatchSize: 500,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
|
||||
now := time.Now()
|
||||
|
||||
plugin.updateWindow(now)
|
||||
|
||||
statFilter, err := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil)
|
||||
require.NoError(t, err)
|
||||
queries := plugin.getDataQueries([]filteredMetric{{metrics: []types.Metric{m}, statFilter: statFilter}})
|
||||
params := &cloudwatch.GetMetricDataInput{
|
||||
StartTime: aws.Time(plugin.windowStart),
|
||||
EndTime: aws.Time(plugin.windowEnd),
|
||||
MetricDataQueries: queries[namespace],
|
||||
}
|
||||
|
||||
require.EqualValues(t, *params.EndTime, now.Add(-time.Duration(plugin.Delay)))
|
||||
require.EqualValues(t, *params.StartTime, now.Add(-time.Duration(plugin.Period)).Add(-time.Duration(plugin.Delay)))
|
||||
require.Len(t, params.MetricDataQueries, 2)
|
||||
require.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1)
|
||||
require.EqualValues(t, 60, *params.MetricDataQueries[0].MetricStat.Period)
|
||||
}
|
||||
|
||||
func TestMetricsCacheTimeout(t *testing.T) {
|
||||
cache := &metricCache{
|
||||
metrics: make([]filteredMetric, 0),
|
||||
built: time.Now(),
|
||||
ttl: time.Minute,
|
||||
}
|
||||
|
||||
require.True(t, cache.metrics != nil && time.Since(cache.built) < cache.ttl)
|
||||
cache.built = time.Now().Add(-time.Minute)
|
||||
require.False(t, cache.metrics != nil && time.Since(cache.built) < cache.ttl)
|
||||
}
|
||||
|
||||
func TestUpdateWindow(t *testing.T) {
|
||||
plugin := &CloudWatch{
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: config.Duration(1 * time.Minute),
|
||||
Period: config.Duration(1 * time.Minute),
|
||||
BatchSize: 500,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
require.True(t, plugin.windowEnd.IsZero())
|
||||
require.True(t, plugin.windowStart.IsZero())
|
||||
|
||||
plugin.updateWindow(now)
|
||||
|
||||
newStartTime := plugin.windowEnd
|
||||
|
||||
// initial window just has a single period
|
||||
require.EqualValues(t, plugin.windowEnd, now.Add(-time.Duration(plugin.Delay)))
|
||||
require.EqualValues(t, plugin.windowStart, now.Add(-time.Duration(plugin.Delay)).Add(-time.Duration(plugin.Period)))
|
||||
|
||||
now = time.Now()
|
||||
plugin.updateWindow(now)
|
||||
|
||||
// subsequent window uses previous end time as start time
|
||||
require.EqualValues(t, plugin.windowEnd, now.Add(-time.Duration(plugin.Delay)))
|
||||
require.EqualValues(t, plugin.windowStart, newStartTime)
|
||||
}
|
||||
|
||||
func TestProxyFunction(t *testing.T) {
|
||||
proxyCfg := proxy.HTTPProxy{HTTPProxyURL: "http://www.penguins.com"}
|
||||
|
||||
proxyFunction, err := proxyCfg.Proxy()
|
||||
require.NoError(t, err)
|
||||
|
||||
u, err := url.Parse("https://monitoring.us-west-1.amazonaws.com/")
|
||||
require.NoError(t, err)
|
||||
|
||||
proxyResult, err := proxyFunction(&http.Request{URL: u})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "www.penguins.com", proxyResult.Host)
|
||||
}
|
||||
|
||||
func TestCombineNamespaces(t *testing.T) {
|
||||
plugin := &CloudWatch{
|
||||
Namespace: "AWS/ELB",
|
||||
Namespaces: []string{"AWS/EC2", "AWS/Billing"},
|
||||
BatchSize: 500,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
require.NoError(t, plugin.Init())
|
||||
require.Equal(t, []string{"AWS/EC2", "AWS/Billing", "AWS/ELB"}, plugin.Namespaces)
|
||||
}
|
||||
|
||||
// INTERNAL mock client implementation
|
||||
type mockClient struct {
|
||||
metrics []types.Metric
|
||||
}
|
||||
|
||||
func defaultMockClient(namespaces ...string) *mockClient {
|
||||
c := &mockClient{
|
||||
metrics: make([]types.Metric, 0, len(namespaces)),
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
c.metrics = append(c.metrics,
|
||||
types.Metric{
|
||||
Namespace: aws.String(namespace),
|
||||
MetricName: aws.String("Latency"),
|
||||
Dimensions: []types.Dimension{
|
||||
{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String("p-example1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
types.Metric{
|
||||
Namespace: aws.String(namespace),
|
||||
MetricName: aws.String("Latency"),
|
||||
Dimensions: []types.Dimension{
|
||||
{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String("p-example2"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func selectedMockClient() *mockClient {
|
||||
c := &mockClient{
|
||||
metrics: make([]types.Metric, 0, 4*3*2),
|
||||
}
|
||||
// 4 metrics for 3 ELBs in 2 AZs
|
||||
for _, m := range []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"} {
|
||||
for _, lb := range []string{"lb-1", "lb-2", "lb-3"} {
|
||||
// For each metric/ELB pair, we get an aggregate value across all AZs.
|
||||
c.metrics = append(c.metrics, types.Metric{
|
||||
Namespace: aws.String("AWS/ELB"),
|
||||
MetricName: aws.String(m),
|
||||
Dimensions: []types.Dimension{
|
||||
{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String(lb),
|
||||
},
|
||||
},
|
||||
})
|
||||
for _, az := range []string{"us-east-1a", "us-east-1b"} {
|
||||
// We get a metric for each metric/ELB/AZ triplet.
|
||||
c.metrics = append(c.metrics, types.Metric{
|
||||
Namespace: aws.String("AWS/ELB"),
|
||||
MetricName: aws.String(m),
|
||||
Dimensions: []types.Dimension{
|
||||
{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String(lb),
|
||||
},
|
||||
{
|
||||
Name: aws.String("AvailabilityZone"),
|
||||
Value: aws.String(az),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *mockClient) ListMetrics(
|
||||
_ context.Context,
|
||||
params *cloudwatch.ListMetricsInput,
|
||||
_ ...func(*cloudwatch.Options),
|
||||
) (*cloudwatch.ListMetricsOutput, error) {
|
||||
response := &cloudwatch.ListMetricsOutput{
|
||||
Metrics: c.metrics,
|
||||
}
|
||||
|
||||
if params.IncludeLinkedAccounts != nil && *params.IncludeLinkedAccounts {
|
||||
response.OwningAccounts = []string{"123456789012", "923456789017"}
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (*mockClient) GetMetricData(
|
||||
_ context.Context,
|
||||
params *cloudwatch.GetMetricDataInput,
|
||||
_ ...func(*cloudwatch.Options),
|
||||
) (*cloudwatch.GetMetricDataOutput, error) {
|
||||
return &cloudwatch.GetMetricDataOutput{
|
||||
MetricDataResults: []types.MetricDataResult{
|
||||
{
|
||||
Id: aws.String("minimum_0_0"),
|
||||
Label: aws.String("latency_minimum"),
|
||||
StatusCode: types.StatusCodeComplete,
|
||||
Timestamps: []time.Time{
|
||||
*params.EndTime,
|
||||
},
|
||||
Values: []float64{0.1},
|
||||
},
|
||||
{
|
||||
Id: aws.String("maximum_0_0"),
|
||||
Label: aws.String("latency_maximum"),
|
||||
StatusCode: types.StatusCodeComplete,
|
||||
Timestamps: []time.Time{
|
||||
*params.EndTime,
|
||||
},
|
||||
Values: []float64{0.3},
|
||||
},
|
||||
{
|
||||
Id: aws.String("average_0_0"),
|
||||
Label: aws.String("latency_average"),
|
||||
StatusCode: types.StatusCodeComplete,
|
||||
Timestamps: []time.Time{
|
||||
*params.EndTime,
|
||||
},
|
||||
Values: []float64{0.2},
|
||||
},
|
||||
{
|
||||
Id: aws.String("sum_0_0"),
|
||||
Label: aws.String("latency_sum"),
|
||||
StatusCode: types.StatusCodeComplete,
|
||||
Timestamps: []time.Time{
|
||||
*params.EndTime,
|
||||
},
|
||||
Values: []float64{123},
|
||||
},
|
||||
{
|
||||
Id: aws.String("sample_count_0_0"),
|
||||
Label: aws.String("latency_sample_count"),
|
||||
StatusCode: types.StatusCodeComplete,
|
||||
Timestamps: []time.Time{
|
||||
*params.EndTime,
|
||||
},
|
||||
Values: []float64{100},
|
||||
},
|
||||
{
|
||||
Id: aws.String("minimum_1_0"),
|
||||
Label: aws.String("latency_minimum"),
|
||||
StatusCode: types.StatusCodeComplete,
|
||||
Timestamps: []time.Time{
|
||||
*params.EndTime,
|
||||
},
|
||||
Values: []float64{0.1},
|
||||
},
|
||||
{
|
||||
Id: aws.String("maximum_1_0"),
|
||||
Label: aws.String("latency_maximum"),
|
||||
StatusCode: types.StatusCodeComplete,
|
||||
Timestamps: []time.Time{
|
||||
*params.EndTime,
|
||||
},
|
||||
Values: []float64{0.3},
|
||||
},
|
||||
{
|
||||
Id: aws.String("average_1_0"),
|
||||
Label: aws.String("latency_average"),
|
||||
StatusCode: types.StatusCodeComplete,
|
||||
Timestamps: []time.Time{
|
||||
*params.EndTime,
|
||||
},
|
||||
Values: []float64{0.2},
|
||||
},
|
||||
{
|
||||
Id: aws.String("sum_1_0"),
|
||||
Label: aws.String("latency_sum"),
|
||||
StatusCode: types.StatusCodeComplete,
|
||||
Timestamps: []time.Time{
|
||||
*params.EndTime,
|
||||
},
|
||||
Values: []float64{124},
|
||||
},
|
||||
{
|
||||
Id: aws.String("sample_count_1_0"),
|
||||
Label: aws.String("latency_sample_count"),
|
||||
StatusCode: types.StatusCodeComplete,
|
||||
Timestamps: []time.Time{
|
||||
*params.EndTime,
|
||||
},
|
||||
Values: []float64{100},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
120
plugins/inputs/cloudwatch/sample.conf
Normal file
120
plugins/inputs/cloudwatch/sample.conf
Normal file
|
@ -0,0 +1,120 @@
|
|||
# Pull Metric Statistics from Amazon CloudWatch
|
||||
[[inputs.cloudwatch]]
|
||||
## Amazon Region
|
||||
region = "us-east-1"
|
||||
|
||||
## Amazon Credentials
|
||||
## Credentials are loaded in the following order
|
||||
## 1) Web identity provider credentials via STS if role_arn and
|
||||
## web_identity_token_file are specified
|
||||
## 2) Assumed credentials via STS if role_arn is specified
|
||||
## 3) explicit credentials from 'access_key' and 'secret_key'
|
||||
## 4) shared profile from 'profile'
|
||||
## 5) environment variables
|
||||
## 6) shared credentials file
|
||||
## 7) EC2 Instance Profile
|
||||
# access_key = ""
|
||||
# secret_key = ""
|
||||
# token = ""
|
||||
# role_arn = ""
|
||||
# web_identity_token_file = ""
|
||||
# role_session_name = ""
|
||||
# profile = ""
|
||||
# shared_credential_file = ""
|
||||
|
||||
## If you are using CloudWatch cross-account observability, you can
|
||||
## set IncludeLinkedAccounts to true in a monitoring account
|
||||
## and collect metrics from the linked source accounts
|
||||
# include_linked_accounts = false
|
||||
|
||||
## Endpoint to make request against, the correct endpoint is automatically
|
||||
## determined and this option should only be set if you wish to override the
|
||||
## default.
|
||||
## ex: endpoint_url = "http://localhost:8000"
|
||||
# endpoint_url = ""
|
||||
|
||||
## Set http_proxy
|
||||
# use_system_proxy = false
|
||||
# http_proxy_url = "http://localhost:8888"
|
||||
|
||||
## The minimum period for Cloudwatch metrics is 1 minute (60s). However not
|
||||
## all metrics are made available to the 1 minute period. Some are collected
|
||||
## at 3 minute, 5 minute, or larger intervals.
|
||||
## See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
## Note that if a period is configured that is smaller than the minimum for a
|
||||
## particular metric, that metric will not be returned by the Cloudwatch API
|
||||
## and will not be collected by Telegraf.
|
||||
#
|
||||
## Requested CloudWatch aggregation Period (required)
|
||||
## Must be a multiple of 60s.
|
||||
period = "5m"
|
||||
|
||||
## Collection Delay (required)
|
||||
## Must account for metrics availability via CloudWatch API
|
||||
delay = "5m"
|
||||
|
||||
## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
## gaps or overlap in pulled data
|
||||
interval = "5m"
|
||||
|
||||
## Recommended if "delay" and "period" are both within 3 hours of request
|
||||
## time. Invalid values will be ignored. Recently Active feature will only
|
||||
## poll for CloudWatch ListMetrics values that occurred within the last 3h.
|
||||
## If enabled, it will reduce total API usage of the CloudWatch ListMetrics
|
||||
## API and require less memory to retain.
|
||||
## Do not enable if "period" or "delay" is longer than 3 hours, as it will
|
||||
## not return data more than 3 hours old.
|
||||
## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html
|
||||
# recently_active = "PT3H"
|
||||
|
||||
## Configure the TTL for the internal cache of metrics.
|
||||
# cache_ttl = "1h"
|
||||
|
||||
## Metric Statistic Namespaces, wildcards are allowed
|
||||
# namespaces = ["*"]
|
||||
|
||||
## Metric Format
|
||||
## This determines the format of the produces metrics. 'sparse', the default
|
||||
## will produce a unique field for each statistic. 'dense' will report all
|
||||
## statistics will be in a field called value and have a metric_name tag
|
||||
## defining the name of the statistic. See the plugin README for examples.
|
||||
# metric_format = "sparse"
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit
|
||||
## is 50 reqs/sec, so if you define multiple namespaces, these should add up
|
||||
## to a maximum of 50.
|
||||
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
|
||||
# ratelimit = 25
|
||||
|
||||
## Timeout for http requests made by the cloudwatch client.
|
||||
# timeout = "5s"
|
||||
|
||||
## Batch Size
|
||||
## The size of each batch to send requests to Cloudwatch. 500 is the
|
||||
## suggested largest size. If a request gets to large (413 errors), consider
|
||||
## reducing this amount.
|
||||
# batch_size = 500
|
||||
|
||||
## Namespace-wide statistic filters. These allow fewer queries to be made to
|
||||
## cloudwatch.
|
||||
# statistic_include = ["average", "sum", "minimum", "maximum", sample_count"]
|
||||
# statistic_exclude = []
|
||||
|
||||
## Metrics to Pull
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
## Refreshes Namespace available metrics every 1h
|
||||
#[[inputs.cloudwatch.metrics]]
|
||||
# names = ["Latency", "RequestCount"]
|
||||
#
|
||||
# ## Statistic filters for Metric. These allow for retrieving specific
|
||||
# ## statistics for an individual metric.
|
||||
# # statistic_include = ["average", "sum", "minimum", "maximum", sample_count"]
|
||||
# # statistic_exclude = []
|
||||
#
|
||||
# ## Dimension filters for Metric.
|
||||
# ## All dimensions defined for the metric names must be specified in order
|
||||
# ## to retrieve the metric statistics.
|
||||
# ## 'value' has wildcard / 'glob' matching support such as 'p-*'.
|
||||
# [[inputs.cloudwatch.metrics.dimensions]]
|
||||
# name = "LoadBalancerName"
|
||||
# value = "p-example"
|
48
plugins/inputs/cloudwatch/utils.go
Normal file
48
plugins/inputs/cloudwatch/utils.go
Normal file
|
@ -0,0 +1,48 @@
|
|||
package cloudwatch
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/cloudwatch/types"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
)
|
||||
|
||||
func metricMatch(cm *cloudwatchMetric, m types.Metric) bool {
|
||||
if !slices.Contains(cm.MetricNames, *m.MetricName) {
|
||||
return false
|
||||
}
|
||||
// Dimensions need to match completely so exit early if the length mismatches
|
||||
if len(cm.Dimensions) != len(m.Dimensions) {
|
||||
return false
|
||||
}
|
||||
// Sort the dimensions for efficient comparison
|
||||
slices.SortStableFunc(m.Dimensions, func(a, b types.Dimension) int {
|
||||
return strings.Compare(*a.Name, *b.Name)
|
||||
})
|
||||
return slices.EqualFunc(cm.Dimensions, m.Dimensions, func(rd *dimension, vd types.Dimension) bool {
|
||||
return rd.Name == *vd.Name && (rd.valueMatcher == nil || rd.valueMatcher.Match(*vd.Value))
|
||||
})
|
||||
}
|
||||
|
||||
func sanitizeMeasurement(namespace string) string {
|
||||
namespace = strings.ReplaceAll(namespace, "/", "_")
|
||||
namespace = snakeCase(namespace)
|
||||
return "cloudwatch_" + namespace
|
||||
}
|
||||
|
||||
func snakeCase(s string) string {
|
||||
s = internal.SnakeCase(s)
|
||||
s = strings.ReplaceAll(s, " ", "_")
|
||||
s = strings.ReplaceAll(s, "__", "_")
|
||||
return s
|
||||
}
|
||||
|
||||
func ctod(cDimensions []types.Dimension) *map[string]string {
|
||||
dimensions := make(map[string]string, len(cDimensions))
|
||||
for i := range cDimensions {
|
||||
dimensions[snakeCase(*cDimensions[i].Name)] = *cDimensions[i].Value
|
||||
}
|
||||
return &dimensions
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue