1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,100 @@
# ActiveMQ Input Plugin
This plugin gathers queue, topics and subscribers metrics using the Console API
[ActiveMQ][activemq] message broker daemon.
⭐ Telegraf v1.8.0
🏷️ messaging
💻 all
[activemq]: https://activemq.apache.org/
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Gather ActiveMQ metrics
[[inputs.activemq]]
## ActiveMQ WebConsole URL
url = "http://127.0.0.1:8161"
## Credentials for basic HTTP authentication
# username = "admin"
# password = "admin"
## Required ActiveMQ webadmin root path
# webadmin = "admin"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```
## Metrics
Every effort was made to preserve the names based on the XML response from the
ActiveMQ Console API.
- activemq_queues
- tags:
- name
- source
- port
- fields:
- size
- consumer_count
- enqueue_count
- dequeue_count
- activemq_topics
- tags:
- name
- source
- port
- fields:
- size
- consumer_count
- enqueue_count
- dequeue_count
- activemq_subscribers
- tags:
- client_id
- subscription_name
- connection_id
- destination_name
- selector
- active
- source
- port
- fields:
- pending_queue_size
- dispatched_queue_size
- dispatched_counter
- enqueue_counter
- dequeue_counter
## Example Output
```text
activemq_queues,name=sandra,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000
activemq_queues,name=Test,host=88284b2fe51b,source=localhost,port=8161 dequeue_count=0i,size=0i,consumer_count=0i,enqueue_count=0i 1492610703000000000
activemq_topics,name=ActiveMQ.Advisory.MasterBroker\ ,host=88284b2fe51b,source=localhost,port=8161 size=0i,consumer_count=0i,enqueue_count=1i,dequeue_count=0i 1492610703000000000
activemq_topics,host=88284b2fe51b,name=AAA\,source=localhost,port=8161 size=0i,consumer_count=1i,enqueue_count=0i,dequeue_count=0i 1492610703000000000
activemq_topics,name=ActiveMQ.Advisory.Topic\,source=localhost,port=8161 ,host=88284b2fe51b enqueue_count=1i,dequeue_count=0i,size=0i,consumer_count=0i 1492610703000000000
activemq_topics,name=ActiveMQ.Advisory.Queue\,source=localhost,port=8161 ,host=88284b2fe51b size=0i,consumer_count=0i,enqueue_count=2i,dequeue_count=0i 1492610703000000000
activemq_topics,name=AAAA\ ,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000
activemq_subscribers,connection_id=NOTSET,destination_name=AAA,,source=localhost,port=8161,selector=AA,active=no,host=88284b2fe51b,client_id=AAA,subscription_name=AAA pending_queue_size=0i,dispatched_queue_size=0i,dispatched_counter=0i,enqueue_counter=0i,dequeue_counter=0i 1492610703000000000
```

View file

@ -0,0 +1,285 @@
//go:generate ../../../tools/readme_config_includer/generator
package activemq
import (
_ "embed"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
type ActiveMQ struct {
Server string `toml:"server" deprecated:"1.11.0;use 'url' instead"`
Port int `toml:"port" deprecated:"1.11.0;use 'url' instead"`
URL string `toml:"url"`
Username string `toml:"username"`
Password string `toml:"password"`
Webadmin string `toml:"webadmin"`
ResponseTimeout config.Duration `toml:"response_timeout"`
tls.ClientConfig
client *http.Client
baseURL *url.URL
}
type topics struct {
XMLName xml.Name `xml:"topics"`
TopicItems []topic `xml:"topic"`
}
type topic struct {
XMLName xml.Name `xml:"topic"`
Name string `xml:"name,attr"`
Stats stats `xml:"stats"`
}
type subscribers struct {
XMLName xml.Name `xml:"subscribers"`
SubscriberItems []subscriber `xml:"subscriber"`
}
type subscriber struct {
XMLName xml.Name `xml:"subscriber"`
ClientID string `xml:"clientId,attr"`
SubscriptionName string `xml:"subscriptionName,attr"`
ConnectionID string `xml:"connectionId,attr"`
DestinationName string `xml:"destinationName,attr"`
Selector string `xml:"selector,attr"`
Active string `xml:"active,attr"`
Stats stats `xml:"stats"`
}
type queues struct {
XMLName xml.Name `xml:"queues"`
QueueItems []queue `xml:"queue"`
}
type queue struct {
XMLName xml.Name `xml:"queue"`
Name string `xml:"name,attr"`
Stats stats `xml:"stats"`
}
type stats struct {
XMLName xml.Name `xml:"stats"`
Size int `xml:"size,attr"`
ConsumerCount int `xml:"consumerCount,attr"`
EnqueueCount int `xml:"enqueueCount,attr"`
DequeueCount int `xml:"dequeueCount,attr"`
PendingQueueSize int `xml:"pendingQueueSize,attr"`
DispatchedQueueSize int `xml:"dispatchedQueueSize,attr"`
DispatchedCounter int `xml:"dispatchedCounter,attr"`
EnqueueCounter int `xml:"enqueueCounter,attr"`
DequeueCounter int `xml:"dequeueCounter,attr"`
}
func (*ActiveMQ) SampleConfig() string {
return sampleConfig
}
func (a *ActiveMQ) Init() error {
if a.ResponseTimeout < config.Duration(time.Second) {
a.ResponseTimeout = config.Duration(time.Second * 5)
}
var err error
u := &url.URL{Scheme: "http", Host: a.Server + ":" + strconv.Itoa(a.Port)}
if a.URL != "" {
u, err = url.Parse(a.URL)
if err != nil {
return err
}
}
if !strings.HasPrefix(u.Scheme, "http") {
return fmt.Errorf("invalid scheme %q", u.Scheme)
}
if u.Hostname() == "" {
return fmt.Errorf("invalid hostname %q", u.Hostname())
}
a.baseURL = u
a.client, err = a.createHTTPClient()
if err != nil {
return err
}
return nil
}
func (a *ActiveMQ) Gather(acc telegraf.Accumulator) error {
dataQueues, err := a.getMetrics(a.queuesURL())
if err != nil {
return err
}
queues := queues{}
err = xml.Unmarshal(dataQueues, &queues)
if err != nil {
return fmt.Errorf("queues XML unmarshal error: %w", err)
}
dataTopics, err := a.getMetrics(a.topicsURL())
if err != nil {
return err
}
topics := topics{}
err = xml.Unmarshal(dataTopics, &topics)
if err != nil {
return fmt.Errorf("topics XML unmarshal error: %w", err)
}
dataSubscribers, err := a.getMetrics(a.subscribersURL())
if err != nil {
return err
}
subscribers := subscribers{}
err = xml.Unmarshal(dataSubscribers, &subscribers)
if err != nil {
return fmt.Errorf("subscribers XML unmarshal error: %w", err)
}
a.gatherQueuesMetrics(acc, queues)
a.gatherTopicsMetrics(acc, topics)
a.gatherSubscribersMetrics(acc, subscribers)
return nil
}
func (a *ActiveMQ) createHTTPClient() (*http.Client, error) {
tlsCfg, err := a.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
},
Timeout: time.Duration(a.ResponseTimeout),
}
return client, nil
}
func (a *ActiveMQ) getMetrics(u string) ([]byte, error) {
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return nil, err
}
if a.Username != "" || a.Password != "" {
req.SetBasicAuth(a.Username, a.Password)
}
resp, err := a.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%s returned HTTP status %s", u, resp.Status)
}
return io.ReadAll(resp.Body)
}
func (a *ActiveMQ) gatherQueuesMetrics(acc telegraf.Accumulator, queues queues) {
for _, queue := range queues.QueueItems {
records := make(map[string]interface{})
tags := make(map[string]string)
tags["name"] = strings.TrimSpace(queue.Name)
tags["source"] = a.baseURL.Hostname()
tags["port"] = a.baseURL.Port()
records["size"] = queue.Stats.Size
records["consumer_count"] = queue.Stats.ConsumerCount
records["enqueue_count"] = queue.Stats.EnqueueCount
records["dequeue_count"] = queue.Stats.DequeueCount
acc.AddFields("activemq_queues", records, tags)
}
}
func (a *ActiveMQ) gatherTopicsMetrics(acc telegraf.Accumulator, topics topics) {
for _, topic := range topics.TopicItems {
records := make(map[string]interface{})
tags := make(map[string]string)
tags["name"] = topic.Name
tags["source"] = a.baseURL.Hostname()
tags["port"] = a.baseURL.Port()
records["size"] = topic.Stats.Size
records["consumer_count"] = topic.Stats.ConsumerCount
records["enqueue_count"] = topic.Stats.EnqueueCount
records["dequeue_count"] = topic.Stats.DequeueCount
acc.AddFields("activemq_topics", records, tags)
}
}
func (a *ActiveMQ) gatherSubscribersMetrics(acc telegraf.Accumulator, subscribers subscribers) {
for _, subscriber := range subscribers.SubscriberItems {
records := make(map[string]interface{})
tags := make(map[string]string)
tags["client_id"] = subscriber.ClientID
tags["subscription_name"] = subscriber.SubscriptionName
tags["connection_id"] = subscriber.ConnectionID
tags["destination_name"] = subscriber.DestinationName
tags["selector"] = subscriber.Selector
tags["active"] = subscriber.Active
tags["source"] = a.baseURL.Hostname()
tags["port"] = a.baseURL.Port()
records["pending_queue_size"] = subscriber.Stats.PendingQueueSize
records["dispatched_queue_size"] = subscriber.Stats.DispatchedQueueSize
records["dispatched_counter"] = subscriber.Stats.DispatchedCounter
records["enqueue_counter"] = subscriber.Stats.EnqueueCounter
records["dequeue_counter"] = subscriber.Stats.DequeueCounter
acc.AddFields("activemq_subscribers", records, tags)
}
}
func (a *ActiveMQ) queuesURL() string {
ref := url.URL{Path: path.Join("/", a.Webadmin, "/xml/queues.jsp")}
return a.baseURL.ResolveReference(&ref).String()
}
func (a *ActiveMQ) topicsURL() string {
ref := url.URL{Path: path.Join("/", a.Webadmin, "/xml/topics.jsp")}
return a.baseURL.ResolveReference(&ref).String()
}
func (a *ActiveMQ) subscribersURL() string {
ref := url.URL{Path: path.Join("/", a.Webadmin, "/xml/subscribers.jsp")}
return a.baseURL.ResolveReference(&ref).String()
}
func init() {
inputs.Add("activemq", func() telegraf.Input {
return &ActiveMQ{
Server: "localhost",
Port: 8161,
Webadmin: "admin",
}
})
}

View file

@ -0,0 +1,186 @@
package activemq
import (
"encoding/xml"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
func TestGatherQueuesMetrics(t *testing.T) {
s := `<queues>
<queue name="sandra">
<stats size="0" consumerCount="0" enqueueCount="0" dequeueCount="0"/>
<feed>
<atom>queueBrowse/sandra?view=rss&amp;feedType=atom_1.0</atom>
<rss>queueBrowse/sandra?view=rss&amp;feedType=rss_2.0</rss>
</feed>
</queue>
<queue name="Test">
<stats size="0" consumerCount="0" enqueueCount="0" dequeueCount="0"/>
<feed>
<atom>queueBrowse/Test?view=rss&amp;feedType=atom_1.0</atom>
<rss>queueBrowse/Test?view=rss&amp;feedType=rss_2.0</rss>
</feed>
</queue>
</queues>`
queues := queues{}
require.NoError(t, xml.Unmarshal([]byte(s), &queues))
records := make(map[string]interface{})
tags := make(map[string]string)
tags["name"] = "Test"
tags["source"] = "localhost"
tags["port"] = "8161"
records["size"] = 0
records["consumer_count"] = 0
records["enqueue_count"] = 0
records["dequeue_count"] = 0
plugin := &ActiveMQ{
Server: "localhost",
Port: 8161,
}
require.NoError(t, plugin.Init())
var acc testutil.Accumulator
plugin.gatherQueuesMetrics(&acc, queues)
acc.AssertContainsTaggedFields(t, "activemq_queues", records, tags)
}
func TestGatherTopicsMetrics(t *testing.T) {
s := `<topics>
<topic name="ActiveMQ.Advisory.MasterBroker ">
<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/>
</topic>
<topic name="AAA ">
<stats size="0" consumerCount="1" enqueueCount="0" dequeueCount="0"/>
</topic>
<topic name="ActiveMQ.Advisory.Topic ">
<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/>
</topic>
<topic name="ActiveMQ.Advisory.Queue ">
<stats size="0" consumerCount="0" enqueueCount="2" dequeueCount="0"/>
</topic>
<topic name="AAAA ">
<stats size="0" consumerCount="0" enqueueCount="0" dequeueCount="0"/>
</topic>
</topics>`
topics := topics{}
require.NoError(t, xml.Unmarshal([]byte(s), &topics))
records := make(map[string]interface{})
tags := make(map[string]string)
tags["name"] = "ActiveMQ.Advisory.MasterBroker "
tags["source"] = "localhost"
tags["port"] = "8161"
records["size"] = 0
records["consumer_count"] = 0
records["enqueue_count"] = 1
records["dequeue_count"] = 0
plugin := &ActiveMQ{
Server: "localhost",
Port: 8161,
}
require.NoError(t, plugin.Init())
var acc testutil.Accumulator
plugin.gatherTopicsMetrics(&acc, topics)
acc.AssertContainsTaggedFields(t, "activemq_topics", records, tags)
}
func TestGatherSubscribersMetrics(t *testing.T) {
s := `<subscribers>
<subscriber clientId="AAA" subscriptionName="AAA" connectionId="NOTSET" destinationName="AAA" selector="AA" active="no">
<stats pendingQueueSize="0" dispatchedQueueSize="0" dispatchedCounter="0" enqueueCounter="0" dequeueCounter="0"/>
</subscriber>
</subscribers>`
subscribers := subscribers{}
require.NoError(t, xml.Unmarshal([]byte(s), &subscribers))
records := make(map[string]interface{})
tags := make(map[string]string)
tags["client_id"] = "AAA"
tags["subscription_name"] = "AAA"
tags["connection_id"] = "NOTSET"
tags["destination_name"] = "AAA"
tags["selector"] = "AA"
tags["active"] = "no"
tags["source"] = "localhost"
tags["port"] = "8161"
records["pending_queue_size"] = 0
records["dispatched_queue_size"] = 0
records["dispatched_counter"] = 0
records["enqueue_counter"] = 0
records["dequeue_counter"] = 0
plugin := &ActiveMQ{
Server: "localhost",
Port: 8161,
}
require.NoError(t, plugin.Init())
var acc testutil.Accumulator
plugin.gatherSubscribersMetrics(&acc, subscribers)
acc.AssertContainsTaggedFields(t, "activemq_subscribers", records, tags)
}
func TestURLs(t *testing.T) {
ts := httptest.NewServer(http.NotFoundHandler())
defer ts.Close()
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/admin/xml/queues.jsp":
w.WriteHeader(http.StatusOK)
if _, err := w.Write([]byte("<queues></queues>")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case "/admin/xml/topics.jsp":
w.WriteHeader(http.StatusOK)
if _, err := w.Write([]byte("<topics></topics>")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case "/admin/xml/subscribers.jsp":
w.WriteHeader(http.StatusOK)
if _, err := w.Write([]byte("<subscribers></subscribers>")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
default:
w.WriteHeader(http.StatusNotFound)
t.Fatalf("unexpected path: %s", r.URL.Path)
}
})
plugin := ActiveMQ{
URL: "http://" + ts.Listener.Addr().String(),
Webadmin: "admin",
}
require.NoError(t, plugin.Init())
var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc))
require.Empty(t, acc.GetTelegrafMetrics())
}

View file

@ -0,0 +1,21 @@
# Gather ActiveMQ metrics
[[inputs.activemq]]
## ActiveMQ WebConsole URL
url = "http://127.0.0.1:8161"
## Credentials for basic HTTP authentication
# username = "admin"
# password = "admin"
## Required ActiveMQ webadmin root path
# webadmin = "admin"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,457 @@
//go:generate ../../../tools/readme_config_includer/generator
package aerospike
import (
"crypto/tls"
_ "embed"
"fmt"
"math"
"strconv"
"strings"
"sync"
"time"
as "github.com/aerospike/aerospike-client-go/v5"
"github.com/influxdata/telegraf"
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
type Aerospike struct {
Servers []string `toml:"servers"`
Username string `toml:"username"`
Password string `toml:"password"`
EnableTLS bool `toml:"enable_tls"`
EnableSSL bool `toml:"enable_ssl" deprecated:"1.7.0;1.35.0;use 'enable_tls' instead"`
TLSName string `toml:"tls_name"`
common_tls.ClientConfig
initialized bool
tlsConfig *tls.Config
DisableQueryNamespaces bool `toml:"disable_query_namespaces"`
Namespaces []string `toml:"namespaces"`
QuerySets bool `toml:"query_sets"`
Sets []string `toml:"sets"`
EnableTTLHistogram bool `toml:"enable_ttl_histogram"`
EnableObjectSizeLinearHistogram bool `toml:"enable_object_size_linear_histogram"`
NumberHistogramBuckets int `toml:"num_histogram_buckets"`
}
// On the random chance a hex value is all digits
// these are fields that can contain hex and should always be strings
var protectedHexFields = map[string]bool{
"node_name": true,
"cluster_key": true,
"paxos_principal": true,
}
func (*Aerospike) SampleConfig() string {
return sampleConfig
}
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
if !a.initialized {
tlsConfig, err := a.ClientConfig.TLSConfig()
if err != nil {
return err
}
if tlsConfig == nil && (a.EnableTLS || a.EnableSSL) {
tlsConfig = &tls.Config{}
}
a.tlsConfig = tlsConfig
a.initialized = true
}
if a.NumberHistogramBuckets == 0 {
a.NumberHistogramBuckets = 10
} else if a.NumberHistogramBuckets > 100 {
a.NumberHistogramBuckets = 100
} else if a.NumberHistogramBuckets < 1 {
a.NumberHistogramBuckets = 10
}
if len(a.Servers) == 0 {
return a.gatherServer(acc, "127.0.0.1:3000")
}
var wg sync.WaitGroup
wg.Add(len(a.Servers))
for _, server := range a.Servers {
go func(serv string) {
defer wg.Done()
acc.AddError(a.gatherServer(acc, serv))
}(server)
}
wg.Wait()
return nil
}
func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) error {
policy := as.NewClientPolicy()
policy.User = a.Username
policy.Password = a.Password
policy.TlsConfig = a.tlsConfig
asHosts, err := as.NewHosts(hostPort)
if err != nil {
return err
}
if a.TLSName != "" && (a.EnableTLS || a.EnableSSL) {
for _, asHost := range asHosts {
asHost.TLSName = a.TLSName
}
}
c, err := as.NewClientWithPolicyAndHost(policy, asHosts...)
if err != nil {
return err
}
asInfoPolicy := as.NewInfoPolicy()
defer c.Close()
nodes := c.GetNodes()
for _, n := range nodes {
nodeHost := n.GetHost().String()
stats, err := getNodeInfo(n, asInfoPolicy)
if err != nil {
return err
}
parseNodeInfo(acc, stats, nodeHost, n.GetName())
namespaces, err := a.getNamespaces(n, asInfoPolicy)
if err != nil {
return err
}
if !a.DisableQueryNamespaces {
// Query Namespaces
for _, namespace := range namespaces {
stats, err = getNamespaceInfo(namespace, n, asInfoPolicy)
if err != nil {
continue
}
parseNamespaceInfo(acc, stats, nodeHost, namespace, n.GetName())
if a.EnableTTLHistogram {
err = a.getTTLHistogram(acc, nodeHost, namespace, "", n, asInfoPolicy)
if err != nil {
continue
}
}
if a.EnableObjectSizeLinearHistogram {
err = a.getObjectSizeLinearHistogram(acc, nodeHost, namespace, "", n, asInfoPolicy)
if err != nil {
continue
}
}
}
}
if a.QuerySets {
namespaceSets, err := a.getSets(n, asInfoPolicy)
if err == nil {
for _, namespaceSet := range namespaceSets {
namespace, set := splitNamespaceSet(namespaceSet)
stats, err := getSetInfo(namespaceSet, n, asInfoPolicy)
if err != nil {
continue
}
parseSetInfo(acc, stats, nodeHost, namespaceSet, n.GetName())
if a.EnableTTLHistogram {
err = a.getTTLHistogram(acc, nodeHost, namespace, set, n, asInfoPolicy)
if err != nil {
continue
}
}
if a.EnableObjectSizeLinearHistogram {
err = a.getObjectSizeLinearHistogram(acc, nodeHost, namespace, set, n, asInfoPolicy)
if err != nil {
continue
}
}
}
}
}
}
return nil
}
func getNodeInfo(n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) {
stats, err := n.RequestInfo(infoPolicy, "statistics")
if err != nil {
return nil, err
}
return stats, nil
}
func parseNodeInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, nodeName string) {
nTags := map[string]string{
"aerospike_host": hostPort,
"node_name": nodeName,
}
nFields := make(map[string]interface{})
stat := strings.Split(stats["statistics"], ";")
for _, pair := range stat {
parts := strings.Split(pair, "=")
if len(parts) < 2 {
continue
}
key := strings.ReplaceAll(parts[0], "-", "_")
nFields[key] = parseAerospikeValue(key, parts[1])
}
acc.AddFields("aerospike_node", nFields, nTags, time.Now())
}
func (a *Aerospike) getNamespaces(n *as.Node, infoPolicy *as.InfoPolicy) ([]string, error) {
var namespaces []string
if len(a.Namespaces) == 0 {
info, err := n.RequestInfo(infoPolicy, "namespaces")
if err != nil {
return namespaces, err
}
namespaces = strings.Split(info["namespaces"], ";")
} else {
namespaces = a.Namespaces
}
return namespaces, nil
}
func getNamespaceInfo(namespace string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) {
stats, err := n.RequestInfo(infoPolicy, "namespace/"+namespace)
if err != nil {
return nil, err
}
return stats, err
}
func parseNamespaceInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespace, nodeName string) {
nTags := map[string]string{
"aerospike_host": hostPort,
"node_name": nodeName,
}
nTags["namespace"] = namespace
nFields := make(map[string]interface{})
stat := strings.Split(stats["namespace/"+namespace], ";")
for _, pair := range stat {
parts := strings.Split(pair, "=")
if len(parts) < 2 {
continue
}
key := strings.ReplaceAll(parts[0], "-", "_")
nFields[key] = parseAerospikeValue(key, parts[1])
}
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
}
func (a *Aerospike) getSets(n *as.Node, infoPolicy *as.InfoPolicy) ([]string, error) {
var namespaceSets []string
// Gather all sets
if len(a.Sets) == 0 {
stats, err := n.RequestInfo(infoPolicy, "sets")
if err != nil {
return namespaceSets, err
}
stat := strings.Split(stats["sets"], ";")
for _, setStats := range stat {
// setInfo is "ns=test:set=foo:objects=1:tombstones=0"
if len(setStats) > 0 {
pairs := strings.Split(setStats, ":")
var ns, set string
for _, pair := range pairs {
parts := strings.Split(pair, "=")
if len(parts) == 2 {
if parts[0] == "ns" {
ns = parts[1]
}
if parts[0] == "set" {
set = parts[1]
}
}
}
if len(ns) > 0 && len(set) > 0 {
namespaceSets = append(namespaceSets, fmt.Sprintf("%s/%s", ns, set))
}
}
}
} else { // User has passed in sets
namespaceSets = a.Sets
}
return namespaceSets, nil
}
func getSetInfo(namespaceSet string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) {
stats, err := n.RequestInfo(infoPolicy, "sets/"+namespaceSet)
if err != nil {
return nil, err
}
return stats, nil
}
func parseSetInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespaceSet, nodeName string) {
stat := strings.Split(
strings.TrimSuffix(
stats["sets/"+namespaceSet], ";"), ":")
nTags := map[string]string{
"aerospike_host": hostPort,
"node_name": nodeName,
"set": namespaceSet,
}
nFields := make(map[string]interface{})
for _, part := range stat {
pieces := strings.Split(part, "=")
if len(pieces) < 2 {
continue
}
key := strings.ReplaceAll(pieces[0], "-", "_")
nFields[key] = parseAerospikeValue(key, pieces[1])
}
acc.AddFields("aerospike_set", nFields, nTags, time.Now())
}
func (a *Aerospike) getTTLHistogram(acc telegraf.Accumulator, hostPort, namespace, set string, n *as.Node, infoPolicy *as.InfoPolicy) error {
stats, err := getHistogram(namespace, set, "ttl", n, infoPolicy)
if err != nil {
return err
}
nTags := createTags(hostPort, n.GetName(), namespace, set)
a.parseHistogram(acc, stats, nTags, "ttl")
return nil
}
func (a *Aerospike) getObjectSizeLinearHistogram(acc telegraf.Accumulator, hostPort, namespace, set string, n *as.Node, infoPolicy *as.InfoPolicy) error {
stats, err := getHistogram(namespace, set, "object-size-linear", n, infoPolicy)
if err != nil {
return err
}
nTags := createTags(hostPort, n.GetName(), namespace, set)
a.parseHistogram(acc, stats, nTags, "object-size-linear")
return nil
}
func getHistogram(namespace, set, histogramType string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) {
var queryArg string
if len(set) > 0 {
queryArg = fmt.Sprintf("histogram:type=%s;namespace=%v;set=%v", histogramType, namespace, set)
} else {
queryArg = fmt.Sprintf("histogram:type=%s;namespace=%v", histogramType, namespace)
}
stats, err := n.RequestInfo(infoPolicy, queryArg)
if err != nil {
return nil, err
}
return stats, nil
}
func (a *Aerospike) parseHistogram(acc telegraf.Accumulator, stats, nTags map[string]string, histogramType string) {
nFields := make(map[string]interface{})
for _, stat := range stats {
for _, part := range strings.Split(stat, ":") {
pieces := strings.Split(part, "=")
if len(pieces) < 2 {
continue
}
if pieces[0] == "buckets" {
buckets := strings.Split(pieces[1], ",")
// Normalize in case of less buckets than expected
numRecordsPerBucket := 1
if len(buckets) > a.NumberHistogramBuckets {
numRecordsPerBucket = int(math.Ceil(float64(len(buckets)) / float64(a.NumberHistogramBuckets)))
}
bucketCount := 0
bucketSum := int64(0) // cast to int64, as can have large object sums
bucketName := 0
for i, bucket := range buckets {
// Sum records and increment bucket collection counter
if bucketCount < numRecordsPerBucket {
bucketSum = bucketSum + parseAerospikeValue("", bucket).(int64)
bucketCount++
}
// Store records and reset counters
// increment bucket name
if bucketCount == numRecordsPerBucket {
nFields[strconv.Itoa(bucketName)] = bucketSum
bucketCount = 0
bucketSum = 0
bucketName++
} else if i == (len(buckets) - 1) {
// base/edge case where final bucket does not fully
// fill number of records per bucket
nFields[strconv.Itoa(bucketName)] = bucketSum
}
}
}
}
}
acc.AddFields(fmt.Sprintf("aerospike_histogram_%v", strings.ReplaceAll(histogramType, "-", "_")), nFields, nTags, time.Now())
}
func splitNamespaceSet(namespaceSet string) (namespace, set string) {
split := strings.Split(namespaceSet, "/")
return split[0], split[1]
}
func parseAerospikeValue(key, v string) interface{} {
if protectedHexFields[key] {
return v
} else if parsed, err := strconv.ParseInt(v, 10, 64); err == nil {
return parsed
} else if parsed, err := strconv.ParseUint(v, 10, 64); err == nil {
return parsed
} else if parsed, err := strconv.ParseBool(v); err == nil {
return parsed
} else if parsed, err := strconv.ParseFloat(v, 32); err == nil {
return parsed
}
// leave as string
return v
}
func createTags(hostPort, nodeName, namespace, set string) map[string]string {
nTags := map[string]string{
"aerospike_host": hostPort,
"node_name": nodeName,
"namespace": namespace,
}
if len(set) > 0 {
nTags["set"] = set
}
return nTags
}
func init() {
inputs.Add("aerospike", func() telegraf.Input {
return &Aerospike{}
})
}

View file

@ -0,0 +1,480 @@
package aerospike
import (
"fmt"
"strconv"
"testing"
as "github.com/aerospike/aerospike-client-go/v5"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go/wait"
"github.com/influxdata/telegraf/testutil"
)
const servicePort = "3000"
func launchTestServer(t *testing.T) *testutil.Container {
container := testutil.Container{
Image: "aerospike:ce-6.0.0.1",
ExposedPorts: []string{servicePort},
WaitingFor: wait.ForLog("migrations: complete"),
}
err := container.Start()
require.NoError(t, err, "failed to start container")
return &container
}
func TestAerospikeStatisticsIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping aerospike integration tests.")
}
container := launchTestServer(t)
defer container.Terminate()
a := &Aerospike{
Servers: []string{fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort])},
}
var acc testutil.Accumulator
err := acc.GatherError(a.Gather)
require.NoError(t, err)
require.True(t, acc.HasMeasurement("aerospike_node"))
require.True(t, acc.HasTag("aerospike_node", "node_name"))
require.True(t, acc.HasMeasurement("aerospike_namespace"))
require.True(t, acc.HasTag("aerospike_namespace", "node_name"))
require.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error"))
namespaceName := acc.TagValue("aerospike_namespace", "namespace")
require.Equal(t, "test", namespaceName)
}
func TestAerospikeStatisticsPartialErrIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping aerospike integration tests.")
}
container := launchTestServer(t)
defer container.Terminate()
a := &Aerospike{
Servers: []string{
fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]),
testutil.GetLocalHost() + ":9999",
},
}
var acc testutil.Accumulator
err := acc.GatherError(a.Gather)
require.Error(t, err)
require.True(t, acc.HasMeasurement("aerospike_node"))
require.True(t, acc.HasMeasurement("aerospike_namespace"))
require.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error"))
namespaceName := acc.TagSetValue("aerospike_namespace", "namespace")
require.Equal(t, "test", namespaceName)
}
func TestSelectNamespacesIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping aerospike integration tests.")
}
container := launchTestServer(t)
defer container.Terminate()
// Select nonexistent namespace
a := &Aerospike{
Servers: []string{fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort])},
Namespaces: []string{"notTest"},
}
var acc testutil.Accumulator
err := acc.GatherError(a.Gather)
require.NoError(t, err)
require.True(t, acc.HasMeasurement("aerospike_node"))
require.True(t, acc.HasTag("aerospike_node", "node_name"))
require.True(t, acc.HasMeasurement("aerospike_namespace"))
require.True(t, acc.HasTag("aerospike_namespace", "node_name"))
// Expect only 1 namespace
count := 0
for _, p := range acc.Metrics {
if p.Measurement == "aerospike_namespace" {
count++
}
}
require.Equal(t, 1, count)
// expect namespace to have no fields as nonexistent
require.False(t, acc.HasInt64Field("aerospke_namespace", "appeals_tx_remaining"))
}
func TestDisableQueryNamespacesIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping aerospike integration tests.")
}
container := launchTestServer(t)
defer container.Terminate()
a := &Aerospike{
Servers: []string{
fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]),
},
DisableQueryNamespaces: true,
}
var acc testutil.Accumulator
err := acc.GatherError(a.Gather)
require.NoError(t, err)
require.True(t, acc.HasMeasurement("aerospike_node"))
require.False(t, acc.HasMeasurement("aerospike_namespace"))
a.DisableQueryNamespaces = false
err = acc.GatherError(a.Gather)
require.NoError(t, err)
require.True(t, acc.HasMeasurement("aerospike_node"))
require.True(t, acc.HasMeasurement("aerospike_namespace"))
}
func TestQuerySetsIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping aerospike integration tests.")
}
container := launchTestServer(t)
defer container.Terminate()
portInt, err := strconv.Atoi(container.Ports[servicePort])
require.NoError(t, err)
// create a set
// test is the default namespace from aerospike
policy := as.NewClientPolicy()
client, errAs := as.NewClientWithPolicy(policy, container.Address, portInt)
require.NoError(t, errAs)
key, errAs := as.NewKey("test", "foo", 123)
require.NoError(t, errAs)
bins := as.BinMap{
"e": 2,
"pi": 3,
}
errAs = client.Add(nil, key, bins)
require.NoError(t, errAs)
key, errAs = as.NewKey("test", "bar", 1234)
require.NoError(t, errAs)
bins = as.BinMap{
"e": 2,
"pi": 3,
}
errAs = client.Add(nil, key, bins)
require.NoError(t, errAs)
a := &Aerospike{
Servers: []string{
fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]),
},
QuerySets: true,
DisableQueryNamespaces: true,
}
var acc testutil.Accumulator
err = acc.GatherError(a.Gather)
require.NoError(t, err)
require.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo"))
require.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar"))
require.True(t, acc.HasMeasurement("aerospike_set"))
require.True(t, acc.HasTag("aerospike_set", "set"))
require.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes"))
}
func TestSelectQuerySetsIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping aerospike integration tests.")
}
container := launchTestServer(t)
defer container.Terminate()
portInt, err := strconv.Atoi(container.Ports[servicePort])
require.NoError(t, err)
// create a set
// test is the default namespace from aerospike
policy := as.NewClientPolicy()
client, errAs := as.NewClientWithPolicy(policy, container.Address, portInt)
require.NoError(t, errAs)
key, errAs := as.NewKey("test", "foo", 123)
require.NoError(t, errAs)
bins := as.BinMap{
"e": 2,
"pi": 3,
}
errAs = client.Add(nil, key, bins)
require.NoError(t, errAs)
key, errAs = as.NewKey("test", "bar", 1234)
require.NoError(t, errAs)
bins = as.BinMap{
"e": 2,
"pi": 3,
}
errAs = client.Add(nil, key, bins)
require.NoError(t, errAs)
a := &Aerospike{
Servers: []string{
fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]),
},
QuerySets: true,
Sets: []string{"test/foo"},
DisableQueryNamespaces: true,
}
var acc testutil.Accumulator
err = acc.GatherError(a.Gather)
require.NoError(t, err)
require.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo"))
require.False(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar"))
require.True(t, acc.HasMeasurement("aerospike_set"))
require.True(t, acc.HasTag("aerospike_set", "set"))
require.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes"))
}
func TestDisableTTLHistogramIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping aerospike integration tests.")
}
container := launchTestServer(t)
defer container.Terminate()
a := &Aerospike{
Servers: []string{
fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]),
},
QuerySets: true,
EnableTTLHistogram: false,
}
/*
No measurement exists
*/
var acc testutil.Accumulator
err := acc.GatherError(a.Gather)
require.NoError(t, err)
require.False(t, acc.HasMeasurement("aerospike_histogram_ttl"))
}
func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping aerospike integration tests.")
}
container := launchTestServer(t)
defer container.Terminate()
a := &Aerospike{
Servers: []string{
fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]),
},
QuerySets: true,
EnableObjectSizeLinearHistogram: false,
}
/*
No Measurement
*/
var acc testutil.Accumulator
err := acc.GatherError(a.Gather)
require.NoError(t, err)
require.False(t, acc.HasMeasurement("aerospike_histogram_object_size_linear"))
}
func TestParseNodeInfo(t *testing.T) {
stats := map[string]string{
"statistics": "early_tsvc_from_proxy_error=0;cluster_principal=BB9020012AC4202;cluster_is_member=true",
}
expectedFields := map[string]interface{}{
"early_tsvc_from_proxy_error": int64(0),
"cluster_principal": "BB9020012AC4202",
"cluster_is_member": true,
}
expectedTags := map[string]string{
"aerospike_host": "127.0.0.1:3000",
"node_name": "TestNodeName",
}
var acc testutil.Accumulator
parseNodeInfo(&acc, stats, "127.0.0.1:3000", "TestNodeName")
acc.AssertContainsTaggedFields(t, "aerospike_node", expectedFields, expectedTags)
}
func TestParseNamespaceInfo(t *testing.T) {
stats := map[string]string{
"namespace/test": "ns_cluster_size=1;effective_replication_factor=1;objects=2;tombstones=0;master_objects=2",
}
expectedFields := map[string]interface{}{
"ns_cluster_size": int64(1),
"effective_replication_factor": int64(1),
"tombstones": int64(0),
"objects": int64(2),
"master_objects": int64(2),
}
expectedTags := map[string]string{
"aerospike_host": "127.0.0.1:3000",
"node_name": "TestNodeName",
"namespace": "test",
}
var acc testutil.Accumulator
parseNamespaceInfo(&acc, stats, "127.0.0.1:3000", "test", "TestNodeName")
acc.AssertContainsTaggedFields(t, "aerospike_namespace", expectedFields, expectedTags)
}
func TestParseSetInfo(t *testing.T) {
stats := map[string]string{
"sets/test/foo": "objects=1:tombstones=0:memory_data_bytes=26;",
}
expectedFields := map[string]interface{}{
"objects": int64(1),
"tombstones": int64(0),
"memory_data_bytes": int64(26),
}
expectedTags := map[string]string{
"aerospike_host": "127.0.0.1:3000",
"node_name": "TestNodeName",
"set": "test/foo",
}
var acc testutil.Accumulator
parseSetInfo(&acc, stats, "127.0.0.1:3000", "test/foo", "TestNodeName")
acc.AssertContainsTaggedFields(t, "aerospike_set", expectedFields, expectedTags)
}
func TestParseHistogramSet(t *testing.T) {
a := &Aerospike{
NumberHistogramBuckets: 10,
}
var acc testutil.Accumulator
stats := map[string]string{
"histogram:type=object-size-linear;namespace=test;set=foo": "units=bytes:hist-width=1048576:bucket-width=1024:buckets=0,1,3,1,6,1,9,1,12,1,15,1,18",
}
expectedFields := map[string]interface{}{
"0": int64(1),
"1": int64(4),
"2": int64(7),
"3": int64(10),
"4": int64(13),
"5": int64(16),
"6": int64(18),
}
expectedTags := map[string]string{
"aerospike_host": "127.0.0.1:3000",
"node_name": "TestNodeName",
"namespace": "test",
"set": "foo",
}
nTags := createTags("127.0.0.1:3000", "TestNodeName", "test", "foo")
a.parseHistogram(&acc, stats, nTags, "object-size-linear")
acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags)
}
func TestParseHistogramNamespace(t *testing.T) {
a := &Aerospike{
NumberHistogramBuckets: 10,
}
var acc testutil.Accumulator
stats := map[string]string{
"histogram:type=object-size-linear;namespace=test;set=foo": " units=bytes:hist-width=1048576:bucket-width=1024:buckets=0,1,3,1,6,1,9,1,12,1,15,1,18",
}
expectedFields := map[string]interface{}{
"0": int64(1),
"1": int64(4),
"2": int64(7),
"3": int64(10),
"4": int64(13),
"5": int64(16),
"6": int64(18),
}
expectedTags := map[string]string{
"aerospike_host": "127.0.0.1:3000",
"node_name": "TestNodeName",
"namespace": "test",
}
nTags := createTags("127.0.0.1:3000", "TestNodeName", "test", "")
a.parseHistogram(&acc, stats, nTags, "object-size-linear")
acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags)
}
func TestAerospikeParseValue(t *testing.T) {
// uint64 with value bigger than int64 max
val := parseAerospikeValue("", "18446744041841121751")
require.Equal(t, uint64(18446744041841121751), val)
val = parseAerospikeValue("", "true")
v, ok := val.(bool)
require.Truef(t, ok, "bool type expected, got '%T' with '%v' value instead", val, val)
require.True(t, v)
// int values
val = parseAerospikeValue("", "42")
require.Equal(t, int64(42), val, "must be parsed as an int64")
// string values
val = parseAerospikeValue("", "BB977942A2CA502")
require.Equal(t, `BB977942A2CA502`, val, "must be left as a string")
// all digit hex values, unprotected
val = parseAerospikeValue("", "1992929191")
require.Equal(t, int64(1992929191), val, "must be parsed as an int64")
// all digit hex values, protected
val = parseAerospikeValue("node_name", "1992929191")
require.Equal(t, `1992929191`, val, "must be left as a string")
}
func FindTagValue(acc *testutil.Accumulator, measurement, key, value string) bool {
for _, p := range acc.Metrics {
if p.Measurement == measurement {
v, ok := p.Tags[key]
if ok && v == value {
return true
}
}
}
return false
}

View file

@ -0,0 +1,41 @@
# Read stats from aerospike server(s)
[[inputs.aerospike]]
## Aerospike servers to connect to (with port)
## This plugin will query all namespaces the aerospike
## server has configured and get stats for them.
servers = ["localhost:3000"]
# username = "telegraf"
# password = "pa$$word"
## Optional TLS Config
# enable_tls = false
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
# tls_name = "tlsname"
## If false, skip chain & host verification
# insecure_skip_verify = true
# Feature Options
# Add namespace variable to limit the namespaces executed on
# Leave blank to do all
# disable_query_namespaces = true # default false
# namespaces = ["namespace1", "namespace2"]
# Enable set level telemetry
# query_sets = true # default: false
# Add namespace set combinations to limit sets executed on
# Leave blank to do all sets
# sets = ["namespace1/set1", "namespace1/set2", "namespace3"]
# Histograms
# enable_ttl_histogram = true # default: false
# enable_object_size_linear_histogram = true # default: false
# by default, aerospike produces a 100 bucket histogram
# this is not great for most graphing tools, this will allow
# the ability to squash this to a smaller number of buckets
# To have a balanced histogram, the number of buckets chosen
# should divide evenly into 100.
# num_histogram_buckets = 100 # default: 10

View file

@ -0,0 +1,195 @@
# Alibaba Cloud Monitor Service (Aliyun) Input Plugin
This plugin gathers statistics from the
[Alibaba / Aliyun cloud monitoring service][alibaba]. In the following we will
use `Aliyun` instead of `Alibaba` as it's the default naming across the web
console and docs.
⭐ Telegraf v1.19.0
🏷️ cloud
💻 all
[alibaba]: https://www.alibabacloud.com
## Aliyun Authentication
This plugin uses an [AccessKey][1] credential for Authentication with the
Aliyun OpenAPI endpoint. In the following order the plugin will attempt
to authenticate.
1. Ram RoleARN credential if `access_key_id`, `access_key_secret`, `role_arn`,
`role_session_name` is specified
2. AccessKey STS token credential if `access_key_id`, `access_key_secret`,
`access_key_sts_token` is specified
3. AccessKey credential if `access_key_id`, `access_key_secret` is specified
4. Ecs Ram Role Credential if `role_name` is specified
5. RSA keypair credential if `private_key`, `public_key_id` is specified
6. Environment variables credential
7. Instance metadata credential
[1]: https://www.alibabacloud.com/help/doc-detail/53045.htm?spm=a2c63.p38356.b99.127.5cba21fdt5MJKr&parentId=28572
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Pull Metric Statistics from Aliyun CMS
[[inputs.aliyuncms]]
## Aliyun Credentials
## Credentials are loaded in the following order
## 1) Ram RoleArn credential
## 2) AccessKey STS token credential
## 3) AccessKey credential
## 4) Ecs Ram Role credential
## 5) RSA keypair credential
## 6) Environment variables credential
## 7) Instance metadata credential
# access_key_id = ""
# access_key_secret = ""
# access_key_sts_token = ""
# role_arn = ""
# role_session_name = ""
# private_key = ""
# public_key_id = ""
# role_name = ""
## Specify ali cloud regions to be queried for metric and object discovery
## If not set, all supported regions (see below) would be covered, it can
## provide a significant load on API, so the recommendation here is to
## limit the list as much as possible.
## Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm
## Default supported regions are:
## cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,
## cn-shanghai, cn-shenzhen, cn-heyuan,cn-chengdu,cn-hongkong,
## ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5,
## ap-south-1,ap-northeast-1, us-west-1,us-east-1,eu-central-1,
## eu-west-1,me-east-1
##
## From discovery perspective it set the scope for object discovery,
## the discovered info can be used to enrich the metrics with objects
## attributes/tags. Discovery is not supported for all projects.
## Currently, discovery supported for the following projects:
## - acs_ecs_dashboard
## - acs_rds_dashboard
## - acs_slb_dashboard
## - acs_vpc_eip
regions = ["cn-hongkong"]
## Requested AliyunCMS aggregation Period (required)
## The period must be multiples of 60s and the minimum for AliyunCMS metrics
## is 1 minute (60s). However not all metrics are made available to the
## one minute period. Some are collected at 3 minute, 5 minute, or larger
## intervals.
## See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv
## Note that if a period is configured that is smaller than the minimum for
## a particular metric, that metric will not be returned by Aliyun's
## OpenAPI and will not be collected by Telegraf.
period = "5m"
## Collection Delay (required)
## The delay must account for metrics availability via AliyunCMS API.
delay = "1m"
## Recommended: use metric 'interval' that is a multiple of 'period'
## to avoid gaps or overlap in pulled data
interval = "5m"
## Metric Statistic Project (required)
project = "acs_slb_dashboard"
## Maximum requests per second, default value is 200
ratelimit = 200
## How often the discovery API call executed (default 1m)
#discovery_interval = "1m"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Metrics to Pull
## At least one metrics definition required
[[inputs.aliyuncms.metrics]]
## Metrics names to be requested,
## Description can be found here (per project):
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"]
## Dimension filters for Metric (optional)
## This allows to get additional metric dimension. If dimension is not
## specified it can be returned or the data can be aggregated - it depends
## on particular metric, you can find details here:
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
##
## Note, that by default dimension filter includes the list of discovered
## objects in scope (if discovery is enabled). Values specified here would
## be added into the list of discovered objects. You can specify either
## single dimension:
# dimensions = '{"instanceId": "p-example"}'
## Or you can specify several dimensions at once:
# dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'
## Tag Query Path
## The following tags added by default:
## * regionId (if discovery enabled)
## * userId
## * instanceId
## Enrichment tags, can be added from discovery (if supported)
## Notation is
## <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
## To figure out which fields are available, consult the
## Describe<ObjectType> API per project. For example, for SLB see:
## https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
# tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]
## Allow metrics without discovery data, if discovery is enabled.
## If set to true, then metric without discovery data would be emitted, otherwise dropped.
## This cane be of help, in case debugging dimension filters, or partial coverage of
## discovery scope vs monitoring scope
# allow_dps_without_discovery = false
```
### Requirements and Terminology
Plugin Configuration utilizes [preset metric items references][2]
- `discovery_region` must be a valid Aliyun
[Region](https://www.alibabacloud.com/help/doc-detail/40654.htm) value
- `period` must be a valid duration value
- `project` must be a preset project value
- `names` must be preset metric names
- `dimensions` must be preset dimension values
[2]: https://www.alibabacloud.com/help/doc-detail/28619.htm?spm=a2c63.p38356.a3.2.389f233d0kPJn0
## Metrics
Each Aliyun CMS Project monitored records a measurement with fields for each
available Metric Statistic Project and Metrics are represented in [snake
case](https://en.wikipedia.org/wiki/Snake_case)
- aliyuncms_{project}
- {metric}_average (metric Average value)
- {metric}_minimum (metric Minimum value)
- {metric}_maximum (metric Maximum value)
- {metric}_value (metric Value value)
## Example Output
```text
aliyuncms_acs_slb_dashboard,instanceId=p-example,regionId=cn-hangzhou,userId=1234567890 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875
```

View file

@ -0,0 +1,496 @@
//go:generate ../../../tools/readme_config_includer/generator
package aliyuncms
import (
_ "embed"
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers"
"github.com/aliyun/alibaba-cloud-sdk-go/services/cms"
"github.com/jmespath/go-jmespath"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/limiter"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
type (
AliyunCMS struct {
AccessKeyID string `toml:"access_key_id"`
AccessKeySecret string `toml:"access_key_secret"`
AccessKeyStsToken string `toml:"access_key_sts_token"`
RoleArn string `toml:"role_arn"`
RoleSessionName string `toml:"role_session_name"`
PrivateKey string `toml:"private_key"`
PublicKeyID string `toml:"public_key_id"`
RoleName string `toml:"role_name"`
Regions []string `toml:"regions"`
DiscoveryInterval config.Duration `toml:"discovery_interval"`
Period config.Duration `toml:"period"`
Delay config.Duration `toml:"delay"`
Project string `toml:"project"`
Metrics []*metric `toml:"metrics"`
RateLimit int `toml:"ratelimit"`
Log telegraf.Logger `toml:"-"`
client aliyuncmsClient
windowStart time.Time
windowEnd time.Time
dt *discoveryTool
dimensionKey string
discoveryData map[string]interface{}
measurement string
}
// metric describes what metrics to get
metric struct {
ObjectsFilter string `toml:"objects_filter"`
MetricNames []string `toml:"names"`
Dimensions string `toml:"dimensions"` // String representation of JSON dimensions
TagsQueryPath []string `toml:"tag_query_path"`
AllowDataPointWODiscoveryData bool `toml:"allow_dps_without_discovery"` // Allow data points without discovery data (if no discovery data found)
dtLock sync.Mutex // Guard for discoveryTags & dimensions
discoveryTags map[string]map[string]string // Internal data structure that can enrich metrics with tags
dimensionsUdObj map[string]string
dimensionsUdArr []map[string]string // Parsed Dimesnsions JSON string (unmarshalled)
requestDimensions []map[string]string // this is the actual dimensions list that would be used in API request
requestDimensionsStr string // String representation of the above
}
aliyuncmsClient interface {
DescribeMetricList(request *cms.DescribeMetricListRequest) (response *cms.DescribeMetricListResponse, err error)
}
)
// https://www.alibabacloud.com/help/doc-detail/40654.htm?gclid=Cj0KCQjw4dr0BRCxARIsAKUNjWTAMfyVUn_Y3OevFBV3CMaazrhq0URHsgE7c0m0SeMQRKlhlsJGgIEaAviyEALw_wcB
var aliyunRegionList = []string{
"cn-qingdao",
"cn-beijing",
"cn-zhangjiakou",
"cn-huhehaote",
"cn-hangzhou",
"cn-shanghai",
"cn-shenzhen",
"cn-heyuan",
"cn-chengdu",
"cn-hongkong",
"ap-southeast-1",
"ap-southeast-2",
"ap-southeast-3",
"ap-southeast-5",
"ap-south-1",
"ap-northeast-1",
"us-west-1",
"us-east-1",
"eu-central-1",
"eu-west-1",
"me-east-1",
}
func (*AliyunCMS) SampleConfig() string {
return sampleConfig
}
func (s *AliyunCMS) Init() error {
if s.Project == "" {
return errors.New("project is not set")
}
var (
roleSessionExpiration = 600
sessionExpiration = 600
)
configuration := &providers.Configuration{
AccessKeyID: s.AccessKeyID,
AccessKeySecret: s.AccessKeySecret,
AccessKeyStsToken: s.AccessKeyStsToken,
RoleArn: s.RoleArn,
RoleSessionName: s.RoleSessionName,
RoleSessionExpiration: &roleSessionExpiration,
PrivateKey: s.PrivateKey,
PublicKeyID: s.PublicKeyID,
SessionExpiration: &sessionExpiration,
RoleName: s.RoleName,
}
credentialProviders := []providers.Provider{
providers.NewConfigurationCredentialProvider(configuration),
providers.NewEnvCredentialProvider(),
providers.NewInstanceMetadataProvider(),
}
credential, err := providers.NewChainProvider(credentialProviders).Retrieve()
if err != nil {
return fmt.Errorf("failed to retrieve credential: %w", err)
}
s.client, err = cms.NewClientWithOptions("", sdk.NewConfig(), credential)
if err != nil {
return fmt.Errorf("failed to create cms client: %w", err)
}
// check metrics dimensions consistency
for i := range s.Metrics {
metric := s.Metrics[i]
if metric.Dimensions == "" {
continue
}
metric.dimensionsUdObj = make(map[string]string)
metric.dimensionsUdArr = make([]map[string]string, 0)
// first try to unmarshal as an object
if err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdObj); err == nil {
// We were successful, so stop here
continue
}
// then try to unmarshal as an array
if err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdArr); err != nil {
return fmt.Errorf("cannot parse dimensions (neither obj, nor array) %q: %w", metric.Dimensions, err)
}
}
s.measurement = formatMeasurement(s.Project)
// Check regions
if len(s.Regions) == 0 {
s.Regions = aliyunRegionList
s.Log.Infof("'regions' is not set. Metrics will be queried across %d regions:\n%s",
len(s.Regions), strings.Join(s.Regions, ","))
}
// Init discovery...
if s.dt == nil { // Support for tests
s.dt, err = newDiscoveryTool(s.Regions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), time.Duration(s.DiscoveryInterval))
if err != nil {
s.Log.Errorf("Discovery tool is not activated: %v", err)
s.dt = nil
return nil
}
}
s.discoveryData, err = s.dt.getDiscoveryDataAcrossRegions(nil)
if err != nil {
s.Log.Errorf("Discovery tool is not activated: %v", err)
s.dt = nil
return nil
}
s.Log.Infof("%d object(s) discovered...", len(s.discoveryData))
// Special setting for acs_oss project since the API differs
if s.Project == "acs_oss" {
s.dimensionKey = "BucketName"
}
return nil
}
// Start plugin discovery loop, metrics are gathered through Gather
func (s *AliyunCMS) Start(telegraf.Accumulator) error {
// Start periodic discovery process
if s.dt != nil {
s.dt.start()
}
return nil
}
func (s *AliyunCMS) Gather(acc telegraf.Accumulator) error {
s.updateWindow(time.Now())
// limit concurrency or we can easily exhaust user connection limit
lmtr := limiter.NewRateLimiter(s.RateLimit, time.Second)
defer lmtr.Stop()
var wg sync.WaitGroup
for _, m := range s.Metrics {
// Prepare internal structure with data from discovery
s.prepareTagsAndDimensions(m)
wg.Add(len(m.MetricNames))
for _, metricName := range m.MetricNames {
<-lmtr.C
go func(metricName string, m *metric) {
defer wg.Done()
acc.AddError(s.gatherMetric(acc, metricName, m))
}(metricName, m)
}
wg.Wait()
}
return nil
}
// Stop - stops the plugin discovery loop
func (s *AliyunCMS) Stop() {
if s.dt != nil {
s.dt.stop()
}
}
func (s *AliyunCMS) updateWindow(relativeTo time.Time) {
// https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.6.701.54025679zh6wiR
// The start and end times are executed in the mode of
// opening left and closing right, and startTime cannot be equal
// to or greater than endTime.
windowEnd := relativeTo.Add(-time.Duration(s.Delay))
if s.windowEnd.IsZero() {
// this is the first run, no window info, so just get a single period
s.windowStart = windowEnd.Add(-time.Duration(s.Period))
} else {
// subsequent window, start where last window left off
s.windowStart = s.windowEnd
}
s.windowEnd = windowEnd
}
// Gather given metric and emit error
func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, metric *metric) error {
for _, region := range s.Regions {
req := cms.CreateDescribeMetricListRequest()
req.Period = strconv.FormatInt(int64(time.Duration(s.Period).Seconds()), 10)
req.MetricName = metricName
req.Length = "10000"
req.Namespace = s.Project
req.EndTime = strconv.FormatInt(s.windowEnd.Unix()*1000, 10)
req.StartTime = strconv.FormatInt(s.windowStart.Unix()*1000, 10)
req.Dimensions = metric.requestDimensionsStr
req.RegionId = region
for more := true; more; {
resp, err := s.client.DescribeMetricList(req)
if err != nil {
return fmt.Errorf("failed to query metricName list: %w", err)
}
if resp.Code != "200" {
s.Log.Errorf("failed to query metricName list: %v", resp.Message)
break
}
var datapoints []map[string]interface{}
if err := json.Unmarshal([]byte(resp.Datapoints), &datapoints); err != nil {
return fmt.Errorf("failed to decode response datapoints: %w", err)
}
if len(datapoints) == 0 {
s.Log.Debugf("No metrics returned from CMS, response msg: %s", resp.Message)
break
}
NextDataPoint:
for _, datapoint := range datapoints {
fields := make(map[string]interface{}, len(datapoint))
tags := make(map[string]string, len(datapoint))
datapointTime := int64(0)
for key, value := range datapoint {
switch key {
case "instanceId", "BucketName":
tags[key] = value.(string)
if metric.discoveryTags != nil { // discovery can be not activated
// Skipping data point if discovery data not exist
_, ok := metric.discoveryTags[value.(string)]
if !ok &&
!metric.AllowDataPointWODiscoveryData {
s.Log.Warnf("Instance %q is not found in discovery, skipping monitoring datapoint...", value.(string))
continue NextDataPoint
}
for k, v := range metric.discoveryTags[value.(string)] {
tags[k] = v
}
}
case "userId":
tags[key] = value.(string)
case "timestamp":
datapointTime = int64(value.(float64)) / 1000
default:
fields[formatField(metricName, key)] = value
}
}
acc.AddFields(s.measurement, fields, tags, time.Unix(datapointTime, 0))
}
req.NextToken = resp.NextToken
more = req.NextToken != ""
}
}
return nil
}
// tag helper
func parseTag(tagSpec string, data interface{}) (tagKey, tagValue string, err error) {
var (
ok bool
queryPath = tagSpec
)
tagKey = tagSpec
// Split query path to tagKey and query path
if splitted := strings.Split(tagSpec, ":"); len(splitted) == 2 {
tagKey = splitted[0]
queryPath = splitted[1]
}
tagRawValue, err := jmespath.Search(queryPath, data)
if err != nil {
return "", "", fmt.Errorf("can't query data from discovery data using query path %q: %w", queryPath, err)
}
if tagRawValue == nil { // Nothing found
return "", "", nil
}
tagValue, ok = tagRawValue.(string)
if !ok {
return "", "", fmt.Errorf("tag value %q parsed by query %q is not a string value", tagRawValue, queryPath)
}
return tagKey, tagValue, nil
}
func (s *AliyunCMS) prepareTagsAndDimensions(metric *metric) {
var (
newData bool
defaultTags = []string{"RegionId:RegionId"}
)
if s.dt == nil { // Discovery is not activated
return
}
// Reading all data from buffered channel
L:
for {
select {
case s.discoveryData = <-s.dt.dataChan:
newData = true
continue
default:
break L
}
}
// new data arrives (so process it) or this is the first call
if newData || len(metric.discoveryTags) == 0 {
metric.dtLock.Lock()
defer metric.dtLock.Unlock()
if metric.discoveryTags == nil {
metric.discoveryTags = make(map[string]map[string]string, len(s.discoveryData))
}
metric.requestDimensions = nil // erasing
metric.requestDimensions = make([]map[string]string, 0, len(s.discoveryData))
// Preparing tags & dims...
for instanceID, elem := range s.discoveryData {
// Start filing tags
// Remove old value if exist
delete(metric.discoveryTags, instanceID)
metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaultTags))
for _, tagQueryPath := range metric.TagsQueryPath {
tagKey, tagValue, err := parseTag(tagQueryPath, elem)
if err != nil {
s.Log.Errorf("%v", err)
continue
}
if err == nil && tagValue == "" { // Nothing found
s.Log.Debugf("Data by query path %q: is not found, for instance %q", tagQueryPath, instanceID)
continue
}
metric.discoveryTags[instanceID][tagKey] = tagValue
}
// Adding default tags if not already there
for _, defaultTagQP := range defaultTags {
tagKey, tagValue, err := parseTag(defaultTagQP, elem)
if err != nil {
s.Log.Errorf("%v", err)
continue
}
if err == nil && tagValue == "" { // Nothing found
s.Log.Debugf("Data by query path %q: is not found, for instance %q",
defaultTagQP, instanceID)
continue
}
metric.discoveryTags[instanceID][tagKey] = tagValue
}
// if no dimension configured in config file, use discovery data
if len(metric.dimensionsUdArr) == 0 && len(metric.dimensionsUdObj) == 0 {
metric.requestDimensions = append(
metric.requestDimensions,
map[string]string{s.dimensionKey: instanceID})
}
}
// add dimensions filter from config file
if len(metric.dimensionsUdArr) != 0 {
metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdArr...)
}
if len(metric.dimensionsUdObj) != 0 {
metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdObj)
}
// Unmarshalling to string
reqDim, err := json.Marshal(metric.requestDimensions)
if err != nil {
s.Log.Errorf("Can't marshal metric request dimensions %v :%v",
metric.requestDimensions, err)
metric.requestDimensionsStr = ""
} else {
metric.requestDimensionsStr = string(reqDim)
}
}
}
// Formatting helpers
func formatField(metricName, statistic string) string {
if metricName == statistic {
statistic = "value"
}
return fmt.Sprintf("%s_%s", snakeCase(metricName), snakeCase(statistic))
}
func formatMeasurement(project string) string {
project = strings.ReplaceAll(project, "/", "_")
project = snakeCase(project)
return "aliyuncms_" + project
}
func snakeCase(s string) string {
s = internal.SnakeCase(s)
s = strings.ReplaceAll(s, "__", "_")
return s
}
func init() {
inputs.Add("aliyuncms", func() telegraf.Input {
return &AliyunCMS{
RateLimit: 200,
DiscoveryInterval: config.Duration(time.Minute),
dimensionKey: "instanceId",
}
})
}

View file

@ -0,0 +1,514 @@
package aliyuncms
import (
"bytes"
"errors"
"fmt"
"io"
"net/http"
"testing"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/alibaba-cloud-sdk-go/services/cms"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/testutil"
)
const inputTitle = "inputs.aliyuncms"
type mockGatherAliyunCMSClient struct{}
func (*mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) {
resp := new(cms.DescribeMetricListResponse)
// switch request.Metric {
switch request.MetricName {
case "InstanceActiveConnection":
resp.Code = "200"
resp.Period = "60"
resp.Datapoints = `
[{
"timestamp": 1490152860000,
"Maximum": 200,
"userId": "1234567898765432",
"Minimum": 100,
"instanceId": "i-abcdefgh123456",
"Average": 150,
"Value": 300
}]`
case "ErrorCode":
resp.Code = "404"
resp.Message = "ErrorCode"
case "ErrorDatapoint":
resp.Code = "200"
resp.Period = "60"
resp.Datapoints = `
[{
"timestamp": 1490152860000,
"Maximum": 200,
"userId": "1234567898765432",
"Minimum": 100,
"instanceId": "i-abcdefgh123456",
"Average": 150,
}]`
case "EmptyDatapoint":
resp.Code = "200"
resp.Period = "60"
resp.Datapoints = `[]`
case "ErrorResp":
return nil, errors.New("error response")
}
return resp, nil
}
type mockAliyunSDKCli struct {
resp *responses.CommonResponse
}
func (m *mockAliyunSDKCli) ProcessCommonRequest(_ *requests.CommonRequest) (response *responses.CommonResponse, err error) {
return m.resp, nil
}
func getDiscoveryTool(project string, discoverRegions []string) (*discoveryTool, error) {
var (
err error
credential auth.Credential
)
configuration := &providers.Configuration{
AccessKeyID: "dummyKey",
AccessKeySecret: "dummySecret",
}
credentialProviders := []providers.Provider{
providers.NewConfigurationCredentialProvider(configuration),
providers.NewEnvCredentialProvider(),
providers.NewInstanceMetadataProvider(),
}
credential, err = providers.NewChainProvider(credentialProviders).Retrieve()
if err != nil {
return nil, fmt.Errorf("failed to retrieve credential: %w", err)
}
dt, err := newDiscoveryTool(discoverRegions, project, testutil.Logger{Name: inputTitle}, credential, 1, time.Minute*2)
if err != nil {
return nil, fmt.Errorf("can't create discovery tool object: %w", err)
}
return dt, nil
}
func getMockSdkCli(httpResp *http.Response) (mockAliyunSDKCli, error) {
resp := responses.NewCommonResponse()
if err := responses.Unmarshal(resp, httpResp, "JSON"); err != nil {
return mockAliyunSDKCli{}, fmt.Errorf("can't parse response: %w", err)
}
return mockAliyunSDKCli{resp: resp}, nil
}
func TestPluginDefaults(t *testing.T) {
require.Equal(t, &AliyunCMS{RateLimit: 200,
DiscoveryInterval: config.Duration(time.Minute),
dimensionKey: "instanceId",
}, inputs.Inputs["aliyuncms"]())
}
func TestPluginInitialize(t *testing.T) {
var err error
plugin := new(AliyunCMS)
plugin.Log = testutil.Logger{Name: inputTitle}
plugin.Regions = []string{"cn-shanghai"}
plugin.dt, err = getDiscoveryTool("acs_slb_dashboard", plugin.Regions)
if err != nil {
t.Fatalf("Can't create discovery tool object: %v", err)
}
httpResp := &http.Response{
StatusCode: 200,
Body: io.NopCloser(bytes.NewBufferString(
`{
"LoadBalancers":
{
"LoadBalancer": [
{"LoadBalancerId":"bla"}
]
},
"TotalCount": 1,
"PageSize": 1,
"PageNumber": 1
}`)),
}
mockCli, err := getMockSdkCli(httpResp)
if err != nil {
t.Fatalf("Can't create mock sdk cli: %v", err)
}
plugin.dt.cli = map[string]aliyunSdkClient{plugin.Regions[0]: &mockCli}
tests := []struct {
name string
project string
accessKeyID string
accessKeySecret string
expectedErrorString string
regions []string
discoveryRegions []string
}{
{
name: "Empty project",
expectedErrorString: "project is not set",
regions: []string{"cn-shanghai"},
},
{
name: "Valid project",
project: "acs_slb_dashboard",
regions: []string{"cn-shanghai"},
accessKeyID: "dummy",
accessKeySecret: "dummy",
},
{
name: "'regions' is not set",
project: "acs_slb_dashboard",
accessKeyID: "dummy",
accessKeySecret: "dummy",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
plugin.Project = tt.project
plugin.AccessKeyID = tt.accessKeyID
plugin.AccessKeySecret = tt.accessKeySecret
plugin.Regions = tt.regions
if tt.expectedErrorString != "" {
require.EqualError(t, plugin.Init(), tt.expectedErrorString)
} else {
require.NoError(t, plugin.Init())
}
if len(tt.regions) == 0 { // Check if set to default
require.Equal(t, plugin.Regions, aliyunRegionList)
}
})
}
}
func TestPluginMetricsInitialize(t *testing.T) {
var err error
plugin := new(AliyunCMS)
plugin.Log = testutil.Logger{Name: inputTitle}
plugin.Regions = []string{"cn-shanghai"}
plugin.dt, err = getDiscoveryTool("acs_slb_dashboard", plugin.Regions)
if err != nil {
t.Fatalf("Can't create discovery tool object: %v", err)
}
httpResp := &http.Response{
StatusCode: 200,
Body: io.NopCloser(bytes.NewBufferString(
`{
"LoadBalancers":
{
"LoadBalancer": [
{"LoadBalancerId":"bla"}
]
},
"TotalCount": 1,
"PageSize": 1,
"PageNumber": 1
}`)),
}
mockCli, err := getMockSdkCli(httpResp)
if err != nil {
t.Fatalf("Can't create mock sdk cli: %v", err)
}
plugin.dt.cli = map[string]aliyunSdkClient{plugin.Regions[0]: &mockCli}
tests := []struct {
name string
project string
accessKeyID string
accessKeySecret string
expectedErrorString string
regions []string
discoveryRegions []string
metrics []*metric
}{
{
name: "Valid project",
project: "acs_slb_dashboard",
regions: []string{"cn-shanghai"},
accessKeyID: "dummy",
accessKeySecret: "dummy",
metrics: []*metric{
{
Dimensions: `{"instanceId": "i-abcdefgh123456"}`,
},
},
},
{
name: "Valid project",
project: "acs_slb_dashboard",
regions: []string{"cn-shanghai"},
accessKeyID: "dummy",
accessKeySecret: "dummy",
metrics: []*metric{
{
Dimensions: `[{"instanceId": "p-example"},{"instanceId": "q-example"}]`,
},
},
},
{
name: "Valid project",
project: "acs_slb_dashboard",
regions: []string{"cn-shanghai"},
accessKeyID: "dummy",
accessKeySecret: "dummy",
expectedErrorString: `cannot parse dimensions (neither obj, nor array) "[": unexpected end of JSON input`,
metrics: []*metric{
{
Dimensions: `[`,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
plugin.Project = tt.project
plugin.AccessKeyID = tt.accessKeyID
plugin.AccessKeySecret = tt.accessKeySecret
plugin.Regions = tt.regions
plugin.Metrics = tt.metrics
if tt.expectedErrorString != "" {
require.EqualError(t, plugin.Init(), tt.expectedErrorString)
} else {
require.NoError(t, plugin.Init())
}
})
}
}
func TestUpdateWindow(t *testing.T) {
duration, err := time.ParseDuration("1m")
require.NoError(t, err)
internalDuration := config.Duration(duration)
plugin := &AliyunCMS{
Project: "acs_slb_dashboard",
Period: internalDuration,
Delay: internalDuration,
Log: testutil.Logger{Name: inputTitle},
}
now := time.Now()
require.True(t, plugin.windowEnd.IsZero())
require.True(t, plugin.windowStart.IsZero())
plugin.updateWindow(now)
newStartTime := plugin.windowEnd
// initial window just has a single period
require.EqualValues(t, plugin.windowEnd, now.Add(-time.Duration(plugin.Delay)))
require.EqualValues(t, plugin.windowStart, now.Add(-time.Duration(plugin.Delay)).Add(-time.Duration(plugin.Period)))
now = time.Now()
plugin.updateWindow(now)
// subsequent window uses previous end time as start time
require.EqualValues(t, plugin.windowEnd, now.Add(-time.Duration(plugin.Delay)))
require.EqualValues(t, plugin.windowStart, newStartTime)
}
func TestGatherMetric(t *testing.T) {
plugin := &AliyunCMS{
Project: "acs_slb_dashboard",
client: new(mockGatherAliyunCMSClient),
measurement: formatMeasurement("acs_slb_dashboard"),
Log: testutil.Logger{Name: inputTitle},
Regions: []string{"cn-shanghai"},
}
metric := &metric{
Dimensions: `"instanceId": "i-abcdefgh123456"`,
}
tests := []struct {
name string
metricName string
expectedErrorString string
}{
{
name: "Datapoint with corrupted JSON",
metricName: "ErrorDatapoint",
expectedErrorString: `failed to decode response datapoints: invalid character '}' looking for beginning of object key string`,
},
{
name: "General CMS response error",
metricName: "ErrorResp",
expectedErrorString: "failed to query metricName list: error response",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc telegraf.Accumulator
require.EqualError(t, plugin.gatherMetric(acc, tt.metricName, metric), tt.expectedErrorString)
})
}
}
func TestGather(t *testing.T) {
m := &metric{
Dimensions: `{"instanceId": "i-abcdefgh123456"}`,
}
plugin := &AliyunCMS{
AccessKeyID: "my_access_key_id",
AccessKeySecret: "my_access_key_secret",
Project: "acs_slb_dashboard",
Metrics: []*metric{m},
RateLimit: 200,
measurement: formatMeasurement("acs_slb_dashboard"),
Regions: []string{"cn-shanghai"},
client: new(mockGatherAliyunCMSClient),
Log: testutil.Logger{Name: inputTitle},
}
// test table:
tests := []struct {
name string
hasMeasurement bool
metricNames []string
expected []telegraf.Metric
}{
{
name: "Empty data point",
metricNames: []string{"EmptyDatapoint"},
expected: []telegraf.Metric{
testutil.MustMetric(
"aliyuncms_acs_slb_dashboard",
nil,
nil,
time.Time{}),
},
},
{
name: "Data point with fields & tags",
hasMeasurement: true,
metricNames: []string{"InstanceActiveConnection"},
expected: []telegraf.Metric{
testutil.MustMetric(
"aliyuncms_acs_slb_dashboard",
map[string]string{
"instanceId": "i-abcdefgh123456",
"userId": "1234567898765432",
},
map[string]interface{}{
"instance_active_connection_minimum": float64(100),
"instance_active_connection_maximum": float64(200),
"instance_active_connection_average": float64(150),
"instance_active_connection_value": float64(300),
},
time.Unix(1490152860000, 0)),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
plugin.Metrics[0].MetricNames = tt.metricNames
require.NoError(t, acc.GatherError(plugin.Gather))
require.Equal(t, acc.HasMeasurement("aliyuncms_acs_slb_dashboard"), tt.hasMeasurement)
if tt.hasMeasurement {
acc.AssertContainsTaggedFields(t, "aliyuncms_acs_slb_dashboard", tt.expected[0].Fields(), tt.expected[0].Tags())
}
})
}
}
func TestGetDiscoveryDataAcrossRegions(t *testing.T) {
// test table:
tests := []struct {
name string
project string
region string
httpResp *http.Response
discData map[string]interface{}
totalCount int
pageSize int
pageNumber int
expectedErrorString string
}{
{
name: "No root key in discovery response",
project: "acs_slb_dashboard",
region: "cn-hongkong",
httpResp: &http.Response{
StatusCode: 200,
Body: io.NopCloser(bytes.NewBufferString(`{}`)),
},
totalCount: 0,
pageSize: 0,
pageNumber: 0,
expectedErrorString: `didn't find root key "LoadBalancers" in discovery response`,
},
{
name: "1 object discovered",
project: "acs_slb_dashboard",
region: "cn-hongkong",
httpResp: &http.Response{
StatusCode: 200,
Body: io.NopCloser(bytes.NewBufferString(
`{
"LoadBalancers":
{
"LoadBalancer": [
{"LoadBalancerId":"bla"}
]
},
"TotalCount": 1,
"PageSize": 1,
"PageNumber": 1
}`)),
},
discData: map[string]interface{}{"bla": map[string]interface{}{"LoadBalancerId": "bla"}},
totalCount: 1,
pageSize: 1,
pageNumber: 1,
expectedErrorString: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dt, err := getDiscoveryTool(tt.project, []string{tt.region})
if err != nil {
t.Fatalf("Can't create discovery tool object: %v", err)
}
mockCli, err := getMockSdkCli(tt.httpResp)
if err != nil {
t.Fatalf("Can't create mock sdk cli: %v", err)
}
dt.cli = map[string]aliyunSdkClient{tt.region: &mockCli}
data, err := dt.getDiscoveryDataAcrossRegions(nil)
require.Equal(t, tt.discData, data)
if err != nil {
require.EqualError(t, err, tt.expectedErrorString)
}
})
}
}

View file

@ -0,0 +1,458 @@
package aliyuncms
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/services/rds"
"github.com/aliyun/alibaba-cloud-sdk-go/services/slb"
"github.com/aliyun/alibaba-cloud-sdk-go/services/vpc"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/limiter"
)
type discoveryRequest interface {
}
type aliyunSdkClient interface {
ProcessCommonRequest(req *requests.CommonRequest) (response *responses.CommonResponse, err error)
}
// discoveryTool is an object that provides discovery feature
type discoveryTool struct {
req map[string]discoveryRequest // Discovery request (specific per object type)
rateLimit int // Rate limit for API query, as it is limited by API backend
reqDefaultPageSize int // Default page size while querying data from API (how many objects per request)
cli map[string]aliyunSdkClient // API client, which perform discovery request
respRootKey string // Root key in JSON response where to look for discovery data
respObjectIDKey string // Key in element of array under root key, that stores object ID
// for, the majority of cases it would be InstanceId, for OSS it is BucketName. This key is also used in dimension filtering
wg sync.WaitGroup // WG for primary discovery goroutine
interval time.Duration // Discovery interval
done chan bool // Done channel to stop primary discovery goroutine
dataChan chan map[string]interface{} // Discovery data
lg telegraf.Logger // Telegraf logger (should be provided)
}
type parsedDResp struct {
data []interface{}
totalCount int
pageSize int
pageNumber int
}
// getRPCReqFromDiscoveryRequest - utility function to map between aliyun request primitives
// discoveryRequest represents different type of discovery requests
func getRPCReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, error) {
if reflect.ValueOf(req).Type().Kind() != reflect.Ptr ||
reflect.ValueOf(req).IsNil() {
return nil, fmt.Errorf("unexpected type of the discovery request object: %q, %q", reflect.ValueOf(req).Type(), reflect.ValueOf(req).Kind())
}
ptrV := reflect.Indirect(reflect.ValueOf(req))
for i := 0; i < ptrV.NumField(); i++ {
if ptrV.Field(i).Type().String() == "*requests.RpcRequest" {
if !ptrV.Field(i).CanInterface() {
return nil, fmt.Errorf("can't get interface of %q", ptrV.Field(i))
}
rpcReq, ok := ptrV.Field(i).Interface().(*requests.RpcRequest)
if !ok {
return nil, fmt.Errorf("can't convert interface of %q to '*requests.RpcRequest' type", ptrV.Field(i).Interface())
}
return rpcReq, nil
}
}
return nil, fmt.Errorf("didn't find *requests.RpcRequest embedded struct in %q", ptrV.Type())
}
// newDiscoveryTool function returns discovery tool object.
// The object is used to periodically get data about aliyun objects and send this
// data into channel. The intention is to enrich reported metrics with discovery data.
// Discovery is supported for a limited set of object types (defined by project) and can be extended in future.
// Discovery can be limited by region if not set, then all regions is queried.
// Request against API can inquire additional costs, consult with aliyun API documentation.
func newDiscoveryTool(
regions []string,
project string,
lg telegraf.Logger,
credential auth.Credential,
rateLimit int,
discoveryInterval time.Duration,
) (*discoveryTool, error) {
var (
responseRootKey string
responseObjectIDKey string
err error
noDiscoverySupportErr = fmt.Errorf("no discovery support for project %q", project)
)
if len(regions) == 0 {
regions = aliyunRegionList
lg.Infof("'regions' is not provided! Discovery data will be queried across %d regions:\n%s",
len(aliyunRegionList), strings.Join(aliyunRegionList, ","))
}
if rateLimit == 0 { // Can be a rounding case
rateLimit = 1
}
dscReq := make(map[string]discoveryRequest, len(regions))
cli := make(map[string]aliyunSdkClient, len(regions))
for _, region := range regions {
switch project {
case "acs_ecs_dashboard":
dscReq[region] = ecs.CreateDescribeInstancesRequest()
responseRootKey = "Instances"
responseObjectIDKey = "InstanceId"
case "acs_rds_dashboard":
dscReq[region] = rds.CreateDescribeDBInstancesRequest()
responseRootKey = "Items"
responseObjectIDKey = "DBInstanceId"
case "acs_slb_dashboard":
dscReq[region] = slb.CreateDescribeLoadBalancersRequest()
responseRootKey = "LoadBalancers"
responseObjectIDKey = "LoadBalancerId"
case "acs_memcache":
return nil, noDiscoverySupportErr
case "acs_ocs":
return nil, noDiscoverySupportErr
case "acs_oss":
// oss is really complicated and its' own format
return nil, noDiscoverySupportErr
case "acs_vpc_eip":
dscReq[region] = vpc.CreateDescribeEipAddressesRequest()
responseRootKey = "EipAddresses"
responseObjectIDKey = "AllocationId"
case "acs_kvstore":
return nil, noDiscoverySupportErr
case "acs_mns_new":
return nil, noDiscoverySupportErr
case "acs_cdn":
// API replies are in its own format.
return nil, noDiscoverySupportErr
case "acs_polardb":
return nil, noDiscoverySupportErr
case "acs_gdb":
return nil, noDiscoverySupportErr
case "acs_ads":
return nil, noDiscoverySupportErr
case "acs_mongodb":
return nil, noDiscoverySupportErr
case "acs_express_connect":
return nil, noDiscoverySupportErr
case "acs_fc":
return nil, noDiscoverySupportErr
case "acs_nat_gateway":
return nil, noDiscoverySupportErr
case "acs_sls_dashboard":
return nil, noDiscoverySupportErr
case "acs_containerservice_dashboard":
return nil, noDiscoverySupportErr
case "acs_vpn":
return nil, noDiscoverySupportErr
case "acs_bandwidth_package":
return nil, noDiscoverySupportErr
case "acs_cen":
return nil, noDiscoverySupportErr
case "acs_ens":
return nil, noDiscoverySupportErr
case "acs_opensearch":
return nil, noDiscoverySupportErr
case "acs_scdn":
return nil, noDiscoverySupportErr
case "acs_drds":
return nil, noDiscoverySupportErr
case "acs_iot":
return nil, noDiscoverySupportErr
case "acs_directmail":
return nil, noDiscoverySupportErr
case "acs_elasticsearch":
return nil, noDiscoverySupportErr
case "acs_ess_dashboard":
return nil, noDiscoverySupportErr
case "acs_streamcompute":
return nil, noDiscoverySupportErr
case "acs_global_acceleration":
return nil, noDiscoverySupportErr
case "acs_hitsdb":
return nil, noDiscoverySupportErr
case "acs_kafka":
return nil, noDiscoverySupportErr
case "acs_openad":
return nil, noDiscoverySupportErr
case "acs_pcdn":
return nil, noDiscoverySupportErr
case "acs_dcdn":
return nil, noDiscoverySupportErr
case "acs_petadata":
return nil, noDiscoverySupportErr
case "acs_videolive":
return nil, noDiscoverySupportErr
case "acs_hybriddb":
return nil, noDiscoverySupportErr
case "acs_adb":
return nil, noDiscoverySupportErr
case "acs_mps":
return nil, noDiscoverySupportErr
case "acs_maxcompute_prepay":
return nil, noDiscoverySupportErr
case "acs_hdfs":
return nil, noDiscoverySupportErr
case "acs_ddh":
return nil, noDiscoverySupportErr
case "acs_hbr":
return nil, noDiscoverySupportErr
case "acs_hdr":
return nil, noDiscoverySupportErr
case "acs_cds":
return nil, noDiscoverySupportErr
default:
return nil, fmt.Errorf("project %q is not recognized by discovery", project)
}
cli[region], err = sdk.NewClientWithOptions(region, sdk.NewConfig(), credential)
if err != nil {
return nil, err
}
}
if len(dscReq) == 0 || len(cli) == 0 {
return nil, fmt.Errorf("can't build discovery request for project: %q, regions: %v", project, regions)
}
return &discoveryTool{
req: dscReq,
cli: cli,
respRootKey: responseRootKey,
respObjectIDKey: responseObjectIDKey,
rateLimit: rateLimit,
interval: discoveryInterval,
reqDefaultPageSize: 20,
dataChan: make(chan map[string]interface{}, 1),
lg: lg,
}, nil
}
func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) (*parsedDResp, error) {
var (
fullOutput = make(map[string]interface{})
data []byte
foundDataItem bool
foundRootKey bool
pdResp = &parsedDResp{}
)
data = resp.GetHttpContentBytes()
if data == nil { // No data
return nil, errors.New("no data in response to be parsed")
}
if err := json.Unmarshal(data, &fullOutput); err != nil {
return nil, fmt.Errorf("can't parse JSON from discovery response: %w", err)
}
for key, val := range fullOutput {
switch key {
case dt.respRootKey:
foundRootKey = true
rootKeyVal, ok := val.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("content of root key %q, is not an object: %q", key, val)
}
// It should contain the array with discovered data
for _, item := range rootKeyVal {
if pdResp.data, foundDataItem = item.([]interface{}); foundDataItem {
break
}
}
if !foundDataItem {
return nil, fmt.Errorf("didn't find array item in root key %q", key)
}
case "TotalCount", "TotalRecordCount":
pdResp.totalCount = int(val.(float64))
case "PageSize", "PageRecordCount":
pdResp.pageSize = int(val.(float64))
case "PageNumber":
pdResp.pageNumber = int(val.(float64))
}
}
if !foundRootKey {
return nil, fmt.Errorf("didn't find root key %q in discovery response", dt.respRootKey)
}
return pdResp, nil
}
func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.CommonRequest, lmtr chan bool) (map[string]interface{}, error) {
var (
err error
resp *responses.CommonResponse
pDResp *parsedDResp
discoveryData []interface{}
totalCount int
pageNumber int
)
defer delete(req.QueryParams, "PageNumber")
for {
if lmtr != nil {
<-lmtr // Rate limiting
}
resp, err = cli.ProcessCommonRequest(req)
if err != nil {
return nil, err
}
pDResp, err = dt.parseDiscoveryResponse(resp)
if err != nil {
return nil, err
}
discoveryData = append(discoveryData, pDResp.data...)
pageNumber = pDResp.pageNumber
totalCount = pDResp.totalCount
// Pagination
pageNumber++
req.QueryParams["PageNumber"] = strconv.Itoa(pageNumber)
if len(discoveryData) == totalCount { // All data received
// Map data to the appropriate shape before return
preparedData := make(map[string]interface{}, len(discoveryData))
for _, raw := range discoveryData {
elem, ok := raw.(map[string]interface{})
if !ok {
return nil, errors.New("can't parse input data element, not a map[string]interface{} type")
}
if objectID, ok := elem[dt.respObjectIDKey].(string); ok {
preparedData[objectID] = elem
}
}
return preparedData, nil
}
}
}
func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[string]interface{}, error) {
resultData := make(map[string]interface{})
for region, cli := range dt.cli {
// Building common request, as the code below is the same no matter
// which aliyun object type (project) is used
dscReq, ok := dt.req[region]
if !ok {
return nil, fmt.Errorf("error building common discovery request: not valid region %q", region)
}
rpcReq, err := getRPCReqFromDiscoveryRequest(dscReq)
if err != nil {
return nil, err
}
commonRequest := requests.NewCommonRequest()
commonRequest.Method = rpcReq.GetMethod()
commonRequest.Product = rpcReq.GetProduct()
commonRequest.Domain = rpcReq.GetDomain()
commonRequest.Version = rpcReq.GetVersion()
commonRequest.Scheme = rpcReq.GetScheme()
commonRequest.ApiName = rpcReq.GetActionName()
commonRequest.QueryParams = rpcReq.QueryParams
commonRequest.QueryParams["PageSize"] = strconv.Itoa(dt.reqDefaultPageSize)
commonRequest.TransToAcsRequest()
// Get discovery data using common request
data, err := dt.getDiscoveryData(cli, commonRequest, lmtr)
if err != nil {
return nil, err
}
for k, v := range data {
resultData[k] = v
}
}
return resultData, nil
}
// start the discovery pooling; in case something new is found, it will be reported back through `dataChan`
func (dt *discoveryTool) start() {
var (
err error
data map[string]interface{}
lastData map[string]interface{}
)
// Initializing channel
dt.done = make(chan bool)
dt.wg.Add(1)
go func() {
defer dt.wg.Done()
ticker := time.NewTicker(dt.interval)
defer ticker.Stop()
lmtr := limiter.NewRateLimiter(dt.rateLimit, time.Second)
defer lmtr.Stop()
for {
select {
case <-dt.done:
return
case <-ticker.C:
data, err = dt.getDiscoveryDataAcrossRegions(lmtr.C)
if err != nil {
dt.lg.Errorf("Can't get discovery data: %v", err)
continue
}
if !reflect.DeepEqual(data, lastData) {
lastData = make(map[string]interface{}, len(data))
for k, v := range data {
lastData[k] = v
}
// send discovery data in blocking mode
dt.dataChan <- data
}
}
}
}()
}
// stop the discovery loop, making sure all data is read from 'dataChan'
func (dt *discoveryTool) stop() {
close(dt.done)
// Shutdown timer
timer := time.NewTimer(time.Second * 3)
defer timer.Stop()
L:
for { // Unblock go routine by reading from dt.dataChan
select {
case <-timer.C:
break L
case <-dt.dataChan:
}
}
dt.wg.Wait()
}

View file

@ -0,0 +1,120 @@
# Pull Metric Statistics from Aliyun CMS
[[inputs.aliyuncms]]
## Aliyun Credentials
## Credentials are loaded in the following order
## 1) Ram RoleArn credential
## 2) AccessKey STS token credential
## 3) AccessKey credential
## 4) Ecs Ram Role credential
## 5) RSA keypair credential
## 6) Environment variables credential
## 7) Instance metadata credential
# access_key_id = ""
# access_key_secret = ""
# access_key_sts_token = ""
# role_arn = ""
# role_session_name = ""
# private_key = ""
# public_key_id = ""
# role_name = ""
## Specify ali cloud regions to be queried for metric and object discovery
## If not set, all supported regions (see below) would be covered, it can
## provide a significant load on API, so the recommendation here is to
## limit the list as much as possible.
## Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm
## Default supported regions are:
## cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,
## cn-shanghai, cn-shenzhen, cn-heyuan,cn-chengdu,cn-hongkong,
## ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5,
## ap-south-1,ap-northeast-1, us-west-1,us-east-1,eu-central-1,
## eu-west-1,me-east-1
##
## From discovery perspective it set the scope for object discovery,
## the discovered info can be used to enrich the metrics with objects
## attributes/tags. Discovery is not supported for all projects.
## Currently, discovery supported for the following projects:
## - acs_ecs_dashboard
## - acs_rds_dashboard
## - acs_slb_dashboard
## - acs_vpc_eip
regions = ["cn-hongkong"]
## Requested AliyunCMS aggregation Period (required)
## The period must be multiples of 60s and the minimum for AliyunCMS metrics
## is 1 minute (60s). However not all metrics are made available to the
## one minute period. Some are collected at 3 minute, 5 minute, or larger
## intervals.
## See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv
## Note that if a period is configured that is smaller than the minimum for
## a particular metric, that metric will not be returned by Aliyun's
## OpenAPI and will not be collected by Telegraf.
period = "5m"
## Collection Delay (required)
## The delay must account for metrics availability via AliyunCMS API.
delay = "1m"
## Recommended: use metric 'interval' that is a multiple of 'period'
## to avoid gaps or overlap in pulled data
interval = "5m"
## Metric Statistic Project (required)
project = "acs_slb_dashboard"
## Maximum requests per second, default value is 200
ratelimit = 200
## How often the discovery API call executed (default 1m)
#discovery_interval = "1m"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Metrics to Pull
## At least one metrics definition required
[[inputs.aliyuncms.metrics]]
## Metrics names to be requested,
## Description can be found here (per project):
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"]
## Dimension filters for Metric (optional)
## This allows to get additional metric dimension. If dimension is not
## specified it can be returned or the data can be aggregated - it depends
## on particular metric, you can find details here:
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
##
## Note, that by default dimension filter includes the list of discovered
## objects in scope (if discovery is enabled). Values specified here would
## be added into the list of discovered objects. You can specify either
## single dimension:
# dimensions = '{"instanceId": "p-example"}'
## Or you can specify several dimensions at once:
# dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'
## Tag Query Path
## The following tags added by default:
## * regionId (if discovery enabled)
## * userId
## * instanceId
## Enrichment tags, can be added from discovery (if supported)
## Notation is
## <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
## To figure out which fields are available, consult the
## Describe<ObjectType> API per project. For example, for SLB see:
## https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
# tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]
## Allow metrics without discovery data, if discovery is enabled.
## If set to true, then metric without discovery data would be emitted, otherwise dropped.
## This cane be of help, in case debugging dimension filters, or partial coverage of
## discovery scope vs monitoring scope
# allow_dps_without_discovery = false

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.activemq
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/activemq" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.aerospike
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.aliyuncms
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/aliyuncms" // register plugin

View file

@ -0,0 +1 @@
package all

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.amd_rocm_smi
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/amd_rocm_smi" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.amqp_consumer
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.apache
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/apache" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.apcupsd
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/apcupsd" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.aurora
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/aurora" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.azure_monitor
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/azure_monitor" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.azure_storage_queue
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/azure_storage_queue" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.bcache
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/bcache" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.beanstalkd
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.beat
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/beat" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.bind
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/bind" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.bond
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/bond" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.burrow
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/burrow" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.ceph
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/ceph" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.cgroup
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/cgroup" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.chrony
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/chrony" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.cisco_telemetry_mdt
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/cisco_telemetry_mdt" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.clickhouse
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/clickhouse" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.cloud_pubsub
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.cloud_pubsub_push
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub_push" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.cloudwatch
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.cloudwatch_metric_streams
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch_metric_streams" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.conntrack
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/conntrack" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.consul
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/consul" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.consul_agent
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/consul_agent" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.couchbase
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/couchbase" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.couchdb
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.cpu
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/cpu" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.csgo
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/csgo" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.ctrlx_datalayer
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/ctrlx_datalayer" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.dcos
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/dcos" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.directory_monitor
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/directory_monitor" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.disk
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/disk" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.diskio
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/diskio" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.disque
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/disque" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.dmcache
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/dmcache" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.dns_query
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/dns_query" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.docker
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/docker" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.docker_log
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/docker_log" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.dovecot
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/dovecot" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.dpdk
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/dpdk" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.ecs
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/ecs" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.elasticsearch
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.elasticsearch_query
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch_query" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.ethtool
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/ethtool" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.eventhub_consumer
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/eventhub_consumer" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.exec
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/exec" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.execd
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/execd" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.fail2ban
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.fibaro
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/fibaro" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.file
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/file" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.filecount
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/filecount" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.filestat
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/filestat" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.fireboard
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/fireboard" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.firehose
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/firehose" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.fluentd
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/fluentd" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.github
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/github" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.gnmi
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/gnmi" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.google_cloud_storage
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/google_cloud_storage" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.graylog
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/graylog" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.haproxy
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.hddtemp
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/hddtemp" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.http
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/http" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.http_listener_v2
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.http_response
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/http_response" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.huebridge
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/huebridge" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.hugepages
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/hugepages" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.icinga2
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/icinga2" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.infiniband
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/infiniband" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.influxdb
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.influxdb_listener
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.influxdb_v2_listener
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.intel_baseband
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/intel_baseband" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.intel_dlb
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/intel_dlb" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.intel_pmt
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/intel_pmt" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.intel_pmu
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/intel_pmu" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.intel_powerstat
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.intel_rdt
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.internal
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/internal" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.internet_speed
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/internet_speed" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.interrupts
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/interrupts" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.ipmi_sensor
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" // register plugin

View file

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.ipset
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/ipset" // register plugin

Some files were not shown because too many files have changed in this diff Show more