1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,399 @@
# Kubernetes Inventory Input Plugin
This plugin gathers metrics from [Kubernetes][kubernetes] resources.
> [!NOTE]
> This plugin requires Kubernetes version 1.11+.
The gathered resources include for example daemon sets, deployments, endpoints,
ingress, nodes, persistent volumes and many more.
> [!CRITICAL]
> This plugin produces high cardinality data, which when not controlled for will
> cause high load on your database. Please make sure to [filter][filtering] the
> produced metrics or configure your database to avoid cardinality issues!
⭐ Telegraf v1.10.0
🏷️ containers
💻 all
[kubernetes]: https://kubernetes.io/
[filtering]: /docs/CONFIGURATION.md#metric-filtering
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Read metrics from the Kubernetes api
[[inputs.kube_inventory]]
## URL for the Kubernetes API.
## If empty in-cluster config with POD's service account token will be used.
# url = ""
## URL for the kubelet, if set it will be used to collect the pods resource metrics
# url_kubelet = "http://127.0.0.1:10255"
## Namespace to use. Set to "" to use all namespaces.
# namespace = "default"
## Node name to filter to. No filtering by default.
# node_name = ""
## Use bearer token for authorization.
## Ignored if url is empty and in-cluster config is used.
# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token"
## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
## Optional Resources to exclude from gathering
## Leave them with blank with try to gather everything available.
## Values can be - "daemonsets", deployments", "endpoints", "ingress",
## "nodes", "persistentvolumes", "persistentvolumeclaims", "pods", "services",
## "statefulsets"
# resource_exclude = [ "deployments", "nodes", "statefulsets" ]
## Optional Resources to include when gathering
## Overrides resource_exclude if both set.
# resource_include = [ "deployments", "nodes", "statefulsets" ]
## selectors to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all selectors as tags
## selector_exclude overrides selector_include if both set.
# selector_include = []
# selector_exclude = ["*"]
## Optional TLS Config
## Trusted root certificates for server
# tls_ca = "/path/to/cafile"
## Used for TLS client certificate authentication
# tls_cert = "/path/to/certfile"
## Used for TLS client certificate authentication
# tls_key = "/path/to/keyfile"
## Send the specified TLS server name via SNI
# tls_server_name = "kubernetes.example.com"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Uncomment to remove deprecated metrics.
# fieldexclude = ["terminated_reason"]
```
## Kubernetes Permissions
If using [RBAC authorization][rbac], you will need to create a cluster role to
list "persistentvolumes" and "nodes". You will then need to make an [aggregated
ClusterRole][agg] that will eventually be bound to a user or group.
[rbac]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/
[agg]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
```yaml
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: influx:cluster:viewer
labels:
rbac.authorization.k8s.io/aggregate-view-telegraf: "true"
rules:
- apiGroups: [""]
resources: ["persistentvolumes", "nodes"]
verbs: ["get", "list"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: influx:telegraf
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.authorization.k8s.io/aggregate-view-telegraf: "true"
- matchLabels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules: [] # Rules are automatically filled in by the controller manager.
```
Bind the newly created aggregated ClusterRole with the following config file,
updating the subjects as needed.
```yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: influx:telegraf:viewer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: influx:telegraf
subjects:
- kind: ServiceAccount
name: telegraf
namespace: default
```
## Quickstart in k3s
When monitoring [k3s](https://k3s.io) server instances one can re-use already
generated administration token. This is less secure than using the more
restrictive dedicated telegraf user but more convenient to set up.
```console
# replace `telegraf` with the user the telegraf process is running as
$ install -o telegraf -m400 /var/lib/rancher/k3s/server/token /run/telegraf-kubernetes-token
$ install -o telegraf -m400 /var/lib/rancher/k3s/server/tls/client-admin.crt /run/telegraf-kubernetes-cert
$ install -o telegraf -m400 /var/lib/rancher/k3s/server/tls/client-admin.key /run/telegraf-kubernetes-key
```
```toml
[kube_inventory]
bearer_token = "/run/telegraf-kubernetes-token"
tls_cert = "/run/telegraf-kubernetes-cert"
tls_key = "/run/telegraf-kubernetes-key"
```
## Metrics
- kubernetes_daemonset
- tags:
- daemonset_name
- namespace
- selector (\*varies)
- fields:
- generation
- current_number_scheduled
- desired_number_scheduled
- number_available
- number_misscheduled
- number_ready
- number_unavailable
- updated_number_scheduled
- kubernetes_deployment
- tags:
- deployment_name
- namespace
- selector (\*varies)
- fields:
- replicas_available
- replicas_unavailable
- created
- kubernetes_endpoints
- tags:
- endpoint_name
- namespace
- hostname
- node_name
- port_name
- port_protocol
- kind (\*varies)
- fields:
- created
- generation
- ready
- port
- kubernetes_ingress
- tags:
- ingress_name
- namespace
- hostname
- ip
- backend_service_name
- path
- host
- fields:
- created
- generation
- backend_service_port
- tls
- kubernetes_node
- tags:
- node_name
- status
- condition
- cluster_namespace
- fields:
- capacity_cpu_cores
- capacity_millicpu_cores
- capacity_memory_bytes
- capacity_pods
- allocatable_cpu_cores
- allocatable_millicpu_cores
- allocatable_memory_bytes
- allocatable_pods
- status_condition
- spec_unschedulable
- node_count
- kubernetes_persistentvolume
- tags:
- pv_name
- phase
- storageclass
- fields:
- phase_type (int, [see below](#pv-phase_type))
- kubernetes_persistentvolumeclaim
- tags:
- pvc_name
- namespace
- phase
- storageclass
- selector (\*varies)
- fields:
- phase_type (int, [see below](#pvc-phase_type))
- kubernetes_pod_container
- tags:
- container_name
- namespace
- node_name
- pod_name
- node_selector (\*varies)
- phase
- state
- readiness
- condition
- fields:
- restarts_total
- state_code
- state_reason
- phase_reason
- terminated_reason (string, deprecated in 1.15: use `state_reason` instead)
- resource_requests_millicpu_units
- resource_requests_memory_bytes
- resource_limits_millicpu_units
- resource_limits_memory_bytes
- status_condition
- kubernetes_service
- tags:
- service_name
- namespace
- port_name
- port_protocol
- external_name
- cluster_ip
- selector (\*varies)
- fields
- created
- generation
- port
- target_port
- kubernetes_statefulset
- tags:
- statefulset_name
- namespace
- selector (\*varies)
- fields:
- created
- generation
- replicas
- replicas_current
- replicas_ready
- replicas_updated
- spec_replicas
- observed_generation
- kubernetes_resourcequota
- tags:
- resource
- namespace
- fields:
- hard_cpu_limits
- hard_cpu_requests
- hard_memory_limit
- hard_memory_requests
- hard_pods
- used_cpu_limits
- used_cpu_requests
- used_memory_limits
- used_memory_requests
- used_pods
- kubernetes_certificate
- tags:
- common_name
- signature_algorithm
- public_key_algorithm
- issuer_common_name
- san
- verification
- name
- namespace
- fields:
- age
- expiry
- startdate
- enddate
- verification_code
### kubernetes node status `status`
The node status ready can mean 3 different values.
| Tag value | Corresponding field value | Meaning |
| --------- | ------------------------- | -------- |
| ready | 0 | NotReady |
| ready | 1 | Ready |
| ready | 2 | Unknown |
### pv `phase_type`
The persistentvolume "phase" is saved in the `phase` tag with a correlated
numeric field called `phase_type` corresponding with that tag value.
| Tag value | Corresponding field value |
| --------- | ------------------------- |
| bound | 0 |
| failed | 1 |
| pending | 2 |
| released | 3 |
| available | 4 |
| unknown | 5 |
### pvc `phase_type`
The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated
numeric field called `phase_type` corresponding with that tag value.
| Tag value | Corresponding field value |
| --------- | ------------------------- |
| bound | 0 |
| lost | 1 |
| pending | 2 |
| unknown | 3 |
## Example Output
```text
kubernetes_configmap,configmap_name=envoy-config,namespace=default,resource_version=56593031 created=1544103867000000000i 1547597616000000000
kubernetes_daemonset,daemonset_name=telegraf,selector_select1=s1,namespace=logging number_unavailable=0i,desired_number_scheduled=11i,number_available=11i,number_misscheduled=8i,number_ready=11i,updated_number_scheduled=11i,created=1527758699000000000i,generation=16i,current_number_scheduled=11i 1547597616000000000
kubernetes_deployment,deployment_name=deployd,selector_select1=s1,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000
kubernetes_node,host=vjain node_count=8i 1628918652000000000
kubernetes_node,condition=Ready,host=vjain,node_name=ip-172-17-0-2.internal,status=True status_condition=1i 1629177980000000000
kubernetes_node,cluster_namespace=tools,condition=Ready,host=vjain,node_name=ip-172-17-0-2.internal,status=True allocatable_cpu_cores=4i,allocatable_memory_bytes=7186567168i,allocatable_millicpu_cores=4000i,allocatable_pods=110i,capacity_cpu_cores=4i,capacity_memory_bytes=7291424768i,capacity_millicpu_cores=4000i,capacity_pods=110i,spec_unschedulable=0i,status_condition=1i 1628918652000000000
kubernetes_resourcequota,host=vjain,namespace=default,resource=pods-high hard_cpu=1000i,hard_memory=214748364800i,hard_pods=10i,used_cpu=0i,used_memory=0i,used_pods=0i 1629110393000000000
kubernetes_resourcequota,host=vjain,namespace=default,resource=pods-low hard_cpu=5i,hard_memory=10737418240i,hard_pods=10i,used_cpu=0i,used_memory=0i,used_pods=0i 1629110393000000000
kubernetes_persistentvolume,phase=Released,pv_name=pvc-aaaaaaaa-bbbb-cccc-1111-222222222222,storageclass=ebs-1-retain phase_type=3i 1547597616000000000
kubernetes_persistentvolumeclaim,namespace=default,phase=Bound,pvc_name=data-etcd-0,selector_select1=s1,storageclass=ebs-1-retain phase_type=0i 1547597615000000000
kubernetes_pod,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1 last_transition_time=1547578322000000000i,ready="false" 1547597616000000000
kubernetes_service,cluster_ip=172.29.61.80,namespace=redis-cache-0001,port_name=redis,port_protocol=TCP,selector_app=myapp,selector_io.kompose.service=redis,selector_role=slave,service_name=redis-slave created=1588690034000000000i,generation=0i,port=6379i,target_port=0i 1547597616000000000
kubernetes_pod_container,condition=Ready,host=vjain,pod_name=uefi-5997f76f69-xzljt,status=True status_condition=1i 1629177981000000000
kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,node_selector_node-role.kubernetes.io/compute=true,pod_name=tick1,phase=Running,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",phase_reason="",resource_requests_memory_bytes=524288000 1547597616000000000
kubernetes_statefulset,namespace=default,selector_select1=s1,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000
```

View file

@ -0,0 +1,94 @@
package kube_inventory
import (
"context"
"crypto/x509"
"encoding/pem"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
"github.com/influxdata/telegraf"
)
func collectSecrets(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
list, err := ki.client.getTLSSecrets(ctx)
if err != nil {
acc.AddError(err)
return
}
for _, i := range list.Items {
gatherCertificates(i, acc)
}
}
func getFields(cert *x509.Certificate, now time.Time) map[string]interface{} {
age := int(now.Sub(cert.NotBefore).Seconds())
expiry := int(cert.NotAfter.Sub(now).Seconds())
startdate := cert.NotBefore.Unix()
enddate := cert.NotAfter.Unix()
fields := map[string]interface{}{
"age": age,
"expiry": expiry,
"startdate": startdate,
"enddate": enddate,
}
return fields
}
func getTags(cert *x509.Certificate) map[string]string {
tags := map[string]string{
"common_name": cert.Subject.CommonName,
"signature_algorithm": cert.SignatureAlgorithm.String(),
"public_key_algorithm": cert.PublicKeyAlgorithm.String(),
}
tags["issuer_common_name"] = cert.Issuer.CommonName
san := append(cert.DNSNames, cert.EmailAddresses...)
for _, ip := range cert.IPAddresses {
san = append(san, ip.String())
}
for _, uri := range cert.URIs {
san = append(san, uri.String())
}
tags["san"] = strings.Join(san, ",")
return tags
}
func gatherCertificates(r corev1.Secret, acc telegraf.Accumulator) {
now := time.Now()
for resourceName, val := range r.Data {
if resourceName != "tls.crt" {
continue
}
block, _ := pem.Decode(val)
if block == nil {
return
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return
}
fields := getFields(cert, now)
tags := getTags(cert)
tags["name"] = r.Name
tags["namespace"] = r.Namespace
opts := x509.VerifyOptions{
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
}
_, err = cert.Verify(opts)
if err == nil {
tags["verification"] = "valid"
fields["verification_code"] = 0
} else {
tags["verification"] = "invalid"
fields["verification_code"] = 1
}
acc.AddFields(certificateMeasurement, fields, tags)
}
}

View file

@ -0,0 +1,165 @@
package kube_inventory
import (
"context"
"net/http"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
discoveryv1 "k8s.io/api/discovery/v1"
netv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/common/tls"
)
type client struct {
namespace string
timeout time.Duration
*kubernetes.Clientset
}
func newClient(baseURL, namespace, bearerTokenFile, bearerToken string, timeout time.Duration, tlsConfig tls.ClientConfig) (*client, error) {
var clientConfig *rest.Config
var err error
if baseURL == "" {
clientConfig, err = rest.InClusterConfig()
if err != nil {
return nil, err
}
} else {
clientConfig = &rest.Config{
TLSClientConfig: rest.TLSClientConfig{
ServerName: tlsConfig.ServerName,
Insecure: tlsConfig.InsecureSkipVerify,
CAFile: tlsConfig.TLSCA,
CertFile: tlsConfig.TLSCert,
KeyFile: tlsConfig.TLSKey,
},
Host: baseURL,
ContentConfig: rest.ContentConfig{},
}
if bearerTokenFile != "" {
clientConfig.BearerTokenFile = bearerTokenFile
} else if bearerToken != "" {
clientConfig.BearerToken = bearerToken
}
}
c, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, err
}
return &client{
Clientset: c,
timeout: timeout,
namespace: namespace,
}, nil
}
func newHTTPClient(tlsConfig tls.ClientConfig, bearerTokenFile string, responseTimeout config.Duration) (*http.Client, error) {
tlsCfg, err := tlsConfig.TLSConfig()
if err != nil {
return nil, err
}
clientConfig := &rest.Config{
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
},
ContentConfig: rest.ContentConfig{},
Timeout: time.Duration(responseTimeout),
BearerTokenFile: bearerTokenFile,
}
return rest.HTTPClientFor(clientConfig)
}
func (c *client) getDaemonSets(ctx context.Context) (*appsv1.DaemonSetList, error) {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
return c.AppsV1().DaemonSets(c.namespace).List(ctx, metav1.ListOptions{})
}
func (c *client) getDeployments(ctx context.Context) (*appsv1.DeploymentList, error) {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
return c.AppsV1().Deployments(c.namespace).List(ctx, metav1.ListOptions{})
}
func (c *client) getEndpoints(ctx context.Context) (*discoveryv1.EndpointSliceList, error) {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
return c.DiscoveryV1().EndpointSlices(c.namespace).List(ctx, metav1.ListOptions{})
}
func (c *client) getIngress(ctx context.Context) (*netv1.IngressList, error) {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
return c.NetworkingV1().Ingresses(c.namespace).List(ctx, metav1.ListOptions{})
}
func (c *client) getNodes(ctx context.Context, name string) (*corev1.NodeList, error) {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
var fieldSelector string
if name != "" {
fieldSelector = "metadata.name=" + name
}
return c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fieldSelector})
}
func (c *client) getPersistentVolumes(ctx context.Context) (*corev1.PersistentVolumeList, error) {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
return c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{})
}
func (c *client) getPersistentVolumeClaims(ctx context.Context) (*corev1.PersistentVolumeClaimList, error) {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
return c.CoreV1().PersistentVolumeClaims(c.namespace).List(ctx, metav1.ListOptions{})
}
func (c *client) getPods(ctx context.Context, nodeName string) (*corev1.PodList, error) {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
var fieldSelector string
if nodeName != "" {
fieldSelector = "spec.nodeName=" + nodeName
}
return c.CoreV1().Pods(c.namespace).List(ctx, metav1.ListOptions{FieldSelector: fieldSelector})
}
func (c *client) getServices(ctx context.Context) (*corev1.ServiceList, error) {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
return c.CoreV1().Services(c.namespace).List(ctx, metav1.ListOptions{})
}
func (c *client) getStatefulSets(ctx context.Context) (*appsv1.StatefulSetList, error) {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
return c.AppsV1().StatefulSets(c.namespace).List(ctx, metav1.ListOptions{})
}
func (c *client) getResourceQuotas(ctx context.Context) (*corev1.ResourceQuotaList, error) {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
return c.CoreV1().ResourceQuotas(c.namespace).List(ctx, metav1.ListOptions{})
}
func (c *client) getTLSSecrets(ctx context.Context) (*corev1.SecretList, error) {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{"type": "kubernetes.io/tls"}}
return c.CoreV1().Secrets(c.namespace).List(ctx, metav1.ListOptions{
FieldSelector: labels.Set(labelSelector.MatchLabels).String(),
})
}

View file

@ -0,0 +1,26 @@
package kube_inventory
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/plugins/common/tls"
)
type mockHandler struct {
responseMap map[string]interface{}
}
func toPtr[T any](v T) *T {
return &v
}
func TestNewClient(t *testing.T) {
_, err := newClient("https://127.0.0.1:443/", "default", "", "abc123", time.Second, tls.ClientConfig{})
require.NoErrorf(t, err, "Failed to create new client: %v", err)
_, err = newClient("https://127.0.0.1:443/", "default", "nonexistantFile", "", time.Second, tls.ClientConfig{})
require.Errorf(t, err, "Failed to read token file \"file\": open file: no such file or directory: %v", err)
}

View file

@ -0,0 +1,49 @@
package kube_inventory
import (
"context"
apps "k8s.io/api/apps/v1"
"github.com/influxdata/telegraf"
)
func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
list, err := ki.client.getDaemonSets(ctx)
if err != nil {
acc.AddError(err)
return
}
for i := range list.Items {
ki.gatherDaemonSet(&list.Items[i], acc)
}
}
func (ki *KubernetesInventory) gatherDaemonSet(d *apps.DaemonSet, acc telegraf.Accumulator) {
fields := map[string]interface{}{
"generation": d.Generation,
"current_number_scheduled": d.Status.CurrentNumberScheduled,
"desired_number_scheduled": d.Status.DesiredNumberScheduled,
"number_available": d.Status.NumberAvailable,
"number_misscheduled": d.Status.NumberMisscheduled,
"number_ready": d.Status.NumberReady,
"number_unavailable": d.Status.NumberUnavailable,
"updated_number_scheduled": d.Status.UpdatedNumberScheduled,
}
tags := map[string]string{
"daemonset_name": d.Name,
"namespace": d.Namespace,
}
for key, val := range d.Spec.Selector.MatchLabels {
if ki.selectorFilter.Match(key) {
tags["selector_"+key] = val
}
}
creationTS := d.GetCreationTimestamp()
if !creationTS.IsZero() {
fields["created"] = d.GetCreationTimestamp().UnixNano()
}
acc.AddFields(daemonSetMeasurement, fields, tags)
}

View file

@ -0,0 +1,279 @@
package kube_inventory
import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
apps "k8s.io/api/apps/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestDaemonSet(t *testing.T) {
cli := &client{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
tests := []struct {
name string
handler *mockHandler
output []telegraf.Metric
hasError bool
}{
{
name: "no daemon set",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/daemonsets/": &apps.DaemonSetList{},
},
},
hasError: false,
},
{
name: "collect daemonsets",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/daemonsets/": &apps.DaemonSetList{
Items: []apps.DaemonSet{
{
Status: apps.DaemonSetStatus{
CurrentNumberScheduled: 3,
DesiredNumberScheduled: 5,
NumberAvailable: 2,
NumberMisscheduled: 2,
NumberReady: 1,
NumberUnavailable: 1,
UpdatedNumberScheduled: 2,
},
ObjectMeta: meta.ObjectMeta{
Generation: 11221,
Namespace: "ns1",
Name: "daemon1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: meta.Time{Time: now},
},
Spec: apps.DaemonSetSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_daemonset",
map[string]string{
"daemonset_name": "daemon1",
"namespace": "ns1",
"selector_select1": "s1",
"selector_select2": "s2",
},
map[string]interface{}{
"generation": int64(11221),
"current_number_scheduled": int32(3),
"desired_number_scheduled": int32(5),
"number_available": int32(2),
"number_misscheduled": int32(2),
"number_ready": int32(1),
"number_unavailable": int32(1),
"updated_number_scheduled": int32(2),
"created": now.UnixNano(),
},
time.Unix(0, 0),
),
},
hasError: false,
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
items := ((v.handler.responseMap["/daemonsets/"]).(*apps.DaemonSetList)).Items
for i := range items {
ks.gatherDaemonSet(&items[i], acc)
}
err := acc.FirstError()
if v.hasError {
require.Errorf(t, err, "%s failed, should have error", v.name)
continue
}
// No error case
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
require.Len(t, acc.Metrics, len(v.output))
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
}
}
func TestDaemonSetSelectorFilter(t *testing.T) {
cli := &client{}
responseMap := map[string]interface{}{
"/daemonsets/": &apps.DaemonSetList{
Items: []apps.DaemonSet{
{
Status: apps.DaemonSetStatus{
CurrentNumberScheduled: 3,
DesiredNumberScheduled: 5,
NumberAvailable: 2,
NumberMisscheduled: 2,
NumberReady: 1,
NumberUnavailable: 1,
UpdatedNumberScheduled: 2,
},
ObjectMeta: meta.ObjectMeta{
Generation: 11221,
Namespace: "ns1",
Name: "daemon1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: meta.Time{Time: time.Now()},
},
Spec: apps.DaemonSetSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
},
},
},
},
}
tests := []struct {
name string
handler *mockHandler
hasError bool
include []string
exclude []string
expected map[string]string
}{
{
name: "nil filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: nil,
exclude: nil,
expected: map[string]string{
"selector_select1": "s1",
"selector_select2": "s2",
},
},
{
name: "empty filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
expected: map[string]string{
"selector_select1": "s1",
"selector_select2": "s2",
},
},
{
name: "include filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"select1"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude filter equals only non-excluded selectors (overrides include filter)",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"select2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "include glob filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"*1"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"*2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"*2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
ks.SelectorInclude = v.include
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
items := ((v.handler.responseMap["/daemonsets/"]).(*apps.DaemonSetList)).Items
for i := range items {
ks.gatherDaemonSet(&items[i], acc)
}
// Grab selector tags
actual := map[string]string{}
for _, metric := range acc.Metrics {
for key, val := range metric.Tags {
if strings.Contains(key, "selector_") {
actual[key] = val
}
}
}
require.Equalf(t, v.expected, actual,
"actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
}
}

View file

@ -0,0 +1,39 @@
package kube_inventory
import (
"context"
"k8s.io/api/apps/v1"
"github.com/influxdata/telegraf"
)
func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
list, err := ki.client.getDeployments(ctx)
if err != nil {
acc.AddError(err)
return
}
for i := range list.Items {
ki.gatherDeployment(&list.Items[i], acc)
}
}
func (ki *KubernetesInventory) gatherDeployment(d *v1.Deployment, acc telegraf.Accumulator) {
fields := map[string]interface{}{
"replicas_available": d.Status.AvailableReplicas,
"replicas_unavailable": d.Status.UnavailableReplicas,
"created": d.GetCreationTimestamp().UnixNano(),
}
tags := map[string]string{
"deployment_name": d.Name,
"namespace": d.Namespace,
}
for key, val := range d.Spec.Selector.MatchLabels {
if ki.selectorFilter.Match(key) {
tags["selector_"+key] = val
}
}
acc.AddFields(deploymentMeasurement, fields, tags)
}

View file

@ -0,0 +1,295 @@
package kube_inventory
import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestDeployment(t *testing.T) {
cli := &client{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
tests := []struct {
name string
handler *mockHandler
output []telegraf.Metric
hasError bool
}{
{
name: "no deployments",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/deployments/": &v1.DeploymentList{},
},
},
hasError: false,
},
{
name: "collect deployments",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/deployments/": &v1.DeploymentList{
Items: []v1.Deployment{
{
Status: v1.DeploymentStatus{
Replicas: 3,
AvailableReplicas: 1,
UnavailableReplicas: 4,
UpdatedReplicas: 2,
ObservedGeneration: 9121,
},
Spec: v1.DeploymentSpec{
Strategy: v1.DeploymentStrategy{
RollingUpdate: &v1.RollingUpdateDeployment{
MaxUnavailable: &intstr.IntOrString{
IntVal: 30,
},
MaxSurge: &intstr.IntOrString{
IntVal: 20,
},
},
},
Replicas: toPtr(int32(4)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 11221,
Namespace: "ns1",
Name: "deploy1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_deployment",
map[string]string{
"namespace": "ns1",
"deployment_name": "deploy1",
"selector_select1": "s1",
"selector_select2": "s2",
},
map[string]interface{}{
"replicas_available": int32(1),
"replicas_unavailable": int32(4),
"created": now.UnixNano(),
},
time.Unix(0, 0),
),
},
hasError: false,
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
items := ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items
for i := range items {
ks.gatherDeployment(&items[i], acc)
}
err := acc.FirstError()
if v.hasError {
require.Errorf(t, err, "%s failed, should have error", v.name)
continue
}
// No error case
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
require.Len(t, acc.Metrics, len(v.output))
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
}
}
func TestDeploymentSelectorFilter(t *testing.T) {
cli := &client{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
responseMap := map[string]interface{}{
"/deployments/": &v1.DeploymentList{
Items: []v1.Deployment{
{
Status: v1.DeploymentStatus{
Replicas: 3,
AvailableReplicas: 1,
UnavailableReplicas: 4,
UpdatedReplicas: 2,
ObservedGeneration: 9121,
},
Spec: v1.DeploymentSpec{
Strategy: v1.DeploymentStrategy{
RollingUpdate: &v1.RollingUpdateDeployment{
MaxUnavailable: &intstr.IntOrString{
IntVal: 30,
},
MaxSurge: &intstr.IntOrString{
IntVal: 20,
},
},
},
Replicas: toPtr(int32(4)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 11221,
Namespace: "ns1",
Name: "deploy1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
}
tests := []struct {
name string
handler *mockHandler
hasError bool
include []string
exclude []string
expected map[string]string
}{
{
name: "nil filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: nil,
exclude: nil,
expected: map[string]string{
"selector_select1": "s1",
"selector_select2": "s2",
},
},
{
name: "empty filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
expected: map[string]string{
"selector_select1": "s1",
"selector_select2": "s2",
},
},
{
name: "include filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"select1"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude filter equals only non-excluded selectors (overrides include filter)",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"select2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "include glob filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"*1"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"*2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"*2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
ks.SelectorInclude = v.include
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
items := ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items
for i := range items {
ks.gatherDeployment(&items[i], acc)
}
// Grab selector tags
actual := map[string]string{}
for _, metric := range acc.Metrics {
for key, val := range metric.Tags {
if strings.Contains(key, "selector_") {
actual[key] = val
}
}
}
require.Equalf(t, v.expected, actual,
"actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
}
}

View file

@ -0,0 +1,63 @@
package kube_inventory
import (
"context"
"strings"
discoveryv1 "k8s.io/api/discovery/v1"
"github.com/influxdata/telegraf"
)
func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
list, err := ki.client.getEndpoints(ctx)
if err != nil {
acc.AddError(err)
return
}
for _, i := range list.Items {
gatherEndpoint(i, acc)
}
}
func gatherEndpoint(e discoveryv1.EndpointSlice, acc telegraf.Accumulator) {
creationTS := e.GetCreationTimestamp()
if creationTS.IsZero() {
return
}
fields := map[string]interface{}{
"created": e.GetCreationTimestamp().UnixNano(),
"generation": e.Generation,
}
tags := map[string]string{
"endpoint_name": e.Name,
"namespace": e.Namespace,
}
for _, endpoint := range e.Endpoints {
fields["ready"] = *endpoint.Conditions.Ready
if endpoint.Hostname != nil {
tags["hostname"] = *endpoint.Hostname
}
if endpoint.NodeName != nil {
tags["node_name"] = *endpoint.NodeName
}
if endpoint.TargetRef != nil {
tags[strings.ToLower(endpoint.TargetRef.Kind)] = endpoint.TargetRef.Name
}
for _, port := range e.Ports {
if port.Port != nil {
fields["port"] = *port.Port
}
tags["port_name"] = *port.Name
tags["port_protocol"] = string(*port.Protocol)
acc.AddFields(endpointMeasurement, fields, tags)
}
}
}

View file

@ -0,0 +1,264 @@
package kube_inventory
import (
"testing"
"time"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
discoveryv1 "k8s.io/api/discovery/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestEndpoint(t *testing.T) {
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
tests := []struct {
name string
handler *mockHandler
output []telegraf.Metric
hasError bool
}{
{
name: "no endpoints",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/endpoints/": &discoveryv1.EndpointSliceList{},
},
},
hasError: false,
},
{
name: "collect ready endpoints",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/endpoints/": &discoveryv1.EndpointSliceList{
Items: []discoveryv1.EndpointSlice{
{
Endpoints: []discoveryv1.Endpoint{
{
Hostname: toPtr("storage-6"),
NodeName: toPtr("b.storage.internal"),
TargetRef: &corev1.ObjectReference{
Kind: "pod",
Name: "storage-6",
},
Conditions: discoveryv1.EndpointConditions{
Ready: toPtr(true),
},
},
},
Ports: []discoveryv1.EndpointPort{
{
Name: toPtr("server"),
Protocol: toPtr(corev1.Protocol("TCP")),
Port: toPtr(int32(8080)),
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 12,
Namespace: "ns1",
Name: "storage",
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_endpoint",
map[string]string{
"endpoint_name": "storage",
"namespace": "ns1",
"hostname": "storage-6",
"node_name": "b.storage.internal",
"port_name": "server",
"port_protocol": "TCP",
"pod": "storage-6",
},
map[string]interface{}{
"ready": true,
"port": int32(8080),
"generation": int64(12),
"created": now.UnixNano(),
},
time.Unix(0, 0),
),
},
hasError: false,
},
{
name: "collect notready endpoints",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/endpoints/": &discoveryv1.EndpointSliceList{
Items: []discoveryv1.EndpointSlice{
{
Endpoints: []discoveryv1.Endpoint{
{
Hostname: toPtr("storage-6"),
NodeName: toPtr("b.storage.internal"),
TargetRef: &corev1.ObjectReference{
Kind: "pod",
Name: "storage-6",
},
Conditions: discoveryv1.EndpointConditions{
Ready: toPtr(false),
},
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 12,
Namespace: "ns1",
Name: "storage",
CreationTimestamp: metav1.Time{Time: now},
},
Ports: []discoveryv1.EndpointPort{
{
Name: toPtr("server"),
Protocol: toPtr(corev1.Protocol("TCP")),
Port: toPtr(int32(8080)),
},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_endpoint",
map[string]string{
"endpoint_name": "storage",
"namespace": "ns1",
"hostname": "storage-6",
"node_name": "b.storage.internal",
"port_name": "server",
"port_protocol": "TCP",
"pod": "storage-6",
},
map[string]interface{}{
"ready": false,
"port": int32(8080),
"generation": int64(12),
"created": now.UnixNano(),
},
time.Unix(0, 0),
),
},
hasError: false,
},
{
name: "endpoints missing node_name",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/endpoints/": &discoveryv1.EndpointSliceList{
Items: []discoveryv1.EndpointSlice{
{
Endpoints: []discoveryv1.Endpoint{
{
Hostname: toPtr("storage-6"),
TargetRef: &corev1.ObjectReference{
Kind: "pod",
Name: "storage-6",
},
Conditions: discoveryv1.EndpointConditions{
Ready: toPtr(false),
},
},
{
Hostname: toPtr("storage-12"),
TargetRef: &corev1.ObjectReference{
Kind: "pod",
Name: "storage-12",
},
Conditions: discoveryv1.EndpointConditions{
Ready: toPtr(true),
},
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 12,
Namespace: "ns1",
Name: "storage",
CreationTimestamp: metav1.Time{Time: now},
},
Ports: []discoveryv1.EndpointPort{
{
Name: toPtr("server"),
Protocol: toPtr(corev1.Protocol("TCP")),
Port: toPtr(int32(8080)),
},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_endpoint",
map[string]string{
"endpoint_name": "storage",
"namespace": "ns1",
"hostname": "storage-6",
"port_name": "server",
"port_protocol": "TCP",
"pod": "storage-6",
},
map[string]interface{}{
"ready": false,
"port": int32(8080),
"generation": int64(12),
"created": now.UnixNano(),
},
time.Unix(0, 0),
),
testutil.MustMetric(
"kubernetes_endpoint",
map[string]string{
"endpoint_name": "storage",
"namespace": "ns1",
"hostname": "storage-12",
"port_name": "server",
"port_protocol": "TCP",
"pod": "storage-12",
},
map[string]interface{}{
"ready": true,
"port": int32(8080),
"generation": int64(12),
"created": now.UnixNano(),
},
time.Unix(0, 0),
),
},
hasError: false,
},
}
for _, v := range tests {
acc := new(testutil.Accumulator)
for _, endpoint := range ((v.handler.responseMap["/endpoints/"]).(*discoveryv1.EndpointSliceList)).Items {
gatherEndpoint(endpoint, acc)
}
err := acc.FirstError()
if v.hasError {
require.Errorf(t, err, "%s failed, should have error", v.name)
continue
}
// No error case
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
require.Len(t, acc.Metrics, len(v.output))
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
}
}

View file

@ -0,0 +1,61 @@
package kube_inventory
import (
"context"
netv1 "k8s.io/api/networking/v1"
"github.com/influxdata/telegraf"
)
func collectIngress(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
list, err := ki.client.getIngress(ctx)
if err != nil {
acc.AddError(err)
return
}
for _, i := range list.Items {
gatherIngress(i, acc)
}
}
func gatherIngress(i netv1.Ingress, acc telegraf.Accumulator) {
creationTS := i.GetCreationTimestamp()
if creationTS.IsZero() {
return
}
fields := map[string]interface{}{
"created": i.GetCreationTimestamp().UnixNano(),
"generation": i.Generation,
}
tags := map[string]string{
"ingress_name": i.Name,
"namespace": i.Namespace,
}
for _, ingress := range i.Status.LoadBalancer.Ingress {
tags["hostname"] = ingress.Hostname
tags["ip"] = ingress.IP
for _, rule := range i.Spec.Rules {
if rule.IngressRuleValue.HTTP == nil {
continue
}
for _, path := range rule.IngressRuleValue.HTTP.Paths {
if path.Backend.Service != nil {
tags["backend_service_name"] = path.Backend.Service.Name
fields["backend_service_port"] = path.Backend.Service.Port.Number
}
fields["tls"] = i.Spec.TLS != nil
tags["path"] = path.Path
tags["host"] = rule.Host
acc.AddFields(ingressMeasurement, fields, tags)
}
}
}
}

View file

@ -0,0 +1,237 @@
package kube_inventory
import (
"testing"
"time"
"github.com/stretchr/testify/require"
netv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestIngress(t *testing.T) {
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
tests := []struct {
name string
handler *mockHandler
output []telegraf.Metric
hasError bool
}{
{
name: "no ingress",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/ingress/": netv1.IngressList{},
},
},
hasError: false,
},
{
name: "collect ingress",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/ingress/": netv1.IngressList{
Items: []netv1.Ingress{
{
Status: netv1.IngressStatus{
LoadBalancer: netv1.IngressLoadBalancerStatus{
Ingress: []netv1.IngressLoadBalancerIngress{
{
Hostname: "chron-1",
IP: "1.0.0.127",
},
},
},
},
Spec: netv1.IngressSpec{
Rules: []netv1.IngressRule{
{
Host: "ui.internal",
IngressRuleValue: netv1.IngressRuleValue{
HTTP: &netv1.HTTPIngressRuleValue{
Paths: []netv1.HTTPIngressPath{
{
Path: "/",
Backend: netv1.IngressBackend{
Service: &netv1.IngressServiceBackend{
Name: "chronografd",
Port: netv1.ServiceBackendPort{
Number: 8080,
},
},
},
},
},
},
},
},
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 12,
Namespace: "ns1",
Name: "ui-lb",
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_ingress",
map[string]string{
"ingress_name": "ui-lb",
"namespace": "ns1",
"ip": "1.0.0.127",
"hostname": "chron-1",
"backend_service_name": "chronografd",
"host": "ui.internal",
"path": "/",
},
map[string]interface{}{
"tls": false,
"backend_service_port": int32(8080),
"generation": int64(12),
"created": now.UnixNano(),
},
time.Unix(0, 0),
),
},
hasError: false,
},
{
name: "no HTTPIngressRuleValue",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/ingress/": netv1.IngressList{
Items: []netv1.Ingress{
{
Status: netv1.IngressStatus{
LoadBalancer: netv1.IngressLoadBalancerStatus{
Ingress: []netv1.IngressLoadBalancerIngress{
{
Hostname: "chron-1",
IP: "1.0.0.127",
},
},
},
},
Spec: netv1.IngressSpec{
Rules: []netv1.IngressRule{
{
Host: "ui.internal",
IngressRuleValue: netv1.IngressRuleValue{
HTTP: nil,
},
},
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 12,
Namespace: "ns1",
Name: "ui-lb",
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
hasError: false,
},
{
name: "no IngressServiceBackend",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/ingress/": netv1.IngressList{
Items: []netv1.Ingress{
{
Status: netv1.IngressStatus{
LoadBalancer: netv1.IngressLoadBalancerStatus{
Ingress: []netv1.IngressLoadBalancerIngress{
{
Hostname: "chron-1",
IP: "1.0.0.127",
},
},
},
},
Spec: netv1.IngressSpec{
Rules: []netv1.IngressRule{
{
Host: "ui.internal",
IngressRuleValue: netv1.IngressRuleValue{
HTTP: &netv1.HTTPIngressRuleValue{
Paths: []netv1.HTTPIngressPath{
{
Path: "/",
Backend: netv1.IngressBackend{
Service: nil,
},
},
},
},
},
},
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 12,
Namespace: "ns1",
Name: "ui-lb",
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_ingress",
map[string]string{
"ingress_name": "ui-lb",
"namespace": "ns1",
"ip": "1.0.0.127",
"hostname": "chron-1",
"host": "ui.internal",
"path": "/",
},
map[string]interface{}{
"tls": false,
"generation": int64(12),
"created": now.UnixNano(),
},
time.Unix(0, 0),
),
},
hasError: false,
},
}
for _, v := range tests {
acc := new(testutil.Accumulator)
for _, ingress := range ((v.handler.responseMap["/ingress/"]).(netv1.IngressList)).Items {
gatherIngress(ingress, acc)
}
err := acc.FirstError()
if v.hasError {
require.Errorf(t, err, "%s failed, should have error", v.name)
continue
}
// No error case
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
require.Len(t, acc.Metrics, len(v.output))
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
}
}

View file

@ -0,0 +1,211 @@
//go:generate ../../../tools/readme_config_includer/generator
package kube_inventory
import (
"context"
_ "embed"
"encoding/json"
"fmt"
"net/http"
"strconv"
"sync"
"time"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
var availableCollectors = map[string]func(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory){
"daemonsets": collectDaemonSets,
"deployments": collectDeployments,
"endpoints": collectEndpoints,
"ingress": collectIngress,
"nodes": collectNodes,
"pods": collectPods,
"services": collectServices,
"statefulsets": collectStatefulSets,
"persistentvolumes": collectPersistentVolumes,
"persistentvolumeclaims": collectPersistentVolumeClaims,
"resourcequotas": collectResourceQuotas,
"secrets": collectSecrets,
}
const (
daemonSetMeasurement = "kubernetes_daemonset"
deploymentMeasurement = "kubernetes_deployment"
endpointMeasurement = "kubernetes_endpoint"
ingressMeasurement = "kubernetes_ingress"
nodeMeasurement = "kubernetes_node"
persistentVolumeMeasurement = "kubernetes_persistentvolume"
persistentVolumeClaimMeasurement = "kubernetes_persistentvolumeclaim"
podContainerMeasurement = "kubernetes_pod_container"
serviceMeasurement = "kubernetes_service"
statefulSetMeasurement = "kubernetes_statefulset"
resourcequotaMeasurement = "kubernetes_resourcequota"
certificateMeasurement = "kubernetes_certificate"
defaultServiceAccountPath = "/var/run/secrets/kubernetes.io/serviceaccount/token"
)
type KubernetesInventory struct {
URL string `toml:"url"`
KubeletURL string `toml:"url_kubelet"`
BearerToken string `toml:"bearer_token"`
BearerTokenString string `toml:"bearer_token_string" deprecated:"1.24.0;1.35.0;use 'BearerToken' with a file instead"`
Namespace string `toml:"namespace"`
ResponseTimeout config.Duration `toml:"response_timeout"` // Timeout specified as a string - 3s, 1m, 1h
ResourceExclude []string `toml:"resource_exclude"`
ResourceInclude []string `toml:"resource_include"`
MaxConfigMapAge config.Duration `toml:"max_config_map_age"`
SelectorInclude []string `toml:"selector_include"`
SelectorExclude []string `toml:"selector_exclude"`
NodeName string `toml:"node_name"`
Log telegraf.Logger `toml:"-"`
tls.ClientConfig
client *client
httpClient *http.Client
selectorFilter filter.Filter
}
func (*KubernetesInventory) SampleConfig() string {
return sampleConfig
}
func (ki *KubernetesInventory) Init() error {
// If neither are provided, use the default service account.
if ki.BearerToken == "" && ki.BearerTokenString == "" {
ki.BearerToken = defaultServiceAccountPath
}
if ki.BearerTokenString != "" {
ki.Log.Warn("Telegraf cannot auto-refresh a bearer token string, use BearerToken file instead")
}
var err error
ki.client, err = newClient(ki.URL, ki.Namespace, ki.BearerToken, ki.BearerTokenString, time.Duration(ki.ResponseTimeout), ki.ClientConfig)
if err != nil {
return err
}
if ki.ResponseTimeout < config.Duration(time.Second) {
ki.ResponseTimeout = config.Duration(time.Second * 5)
}
// Only create an http client if we have a kubelet url
if ki.KubeletURL != "" {
ki.httpClient, err = newHTTPClient(ki.ClientConfig, ki.BearerToken, ki.ResponseTimeout)
if err != nil {
ki.Log.Warnf("unable to create http client: %v", err)
}
}
return nil
}
func (ki *KubernetesInventory) Gather(acc telegraf.Accumulator) (err error) {
resourceFilter, err := filter.NewIncludeExcludeFilter(ki.ResourceInclude, ki.ResourceExclude)
if err != nil {
return err
}
ki.selectorFilter, err = filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude)
if err != nil {
return err
}
wg := sync.WaitGroup{}
ctx := context.Background()
for collector, f := range availableCollectors {
if resourceFilter.Match(collector) {
wg.Add(1)
go func(f func(ctx context.Context, acc telegraf.Accumulator, k *KubernetesInventory)) {
defer wg.Done()
f(ctx, acc, ki)
}(f)
}
}
wg.Wait()
return nil
}
func atoi(s string) int64 {
i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0
}
return i
}
func (ki *KubernetesInventory) convertQuantity(s string, m float64) int64 {
q, err := resource.ParseQuantity(s)
if err != nil {
ki.Log.Debugf("failed to parse quantity: %s", err.Error())
return 0
}
f, err := strconv.ParseFloat(fmt.Sprint(q.AsDec()), 64)
if err != nil {
ki.Log.Debugf("failed to parse float: %s", err.Error())
return 0
}
if m < 1 {
m = 1
}
return int64(f * m)
}
func (ki *KubernetesInventory) queryPodsFromKubelet(url string, v interface{}) error {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return fmt.Errorf("creating new http request for url %s failed: %w", url, err)
}
req.Header.Add("Accept", "application/json")
resp, err := ki.httpClient.Do(req)
if err != nil {
return fmt.Errorf("error making HTTP request to %q: %w", url, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status %s", url, resp.Status)
}
if err := json.NewDecoder(resp.Body).Decode(v); err != nil {
return fmt.Errorf("error parsing response: %w", err)
}
return nil
}
func (ki *KubernetesInventory) createSelectorFilters() error {
selectorFilter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude)
if err != nil {
return err
}
ki.selectorFilter = selectorFilter
return nil
}
func init() {
inputs.Add("kube_inventory", func() telegraf.Input {
return &KubernetesInventory{
ResponseTimeout: config.Duration(time.Second * 5),
Namespace: "default",
SelectorInclude: make([]string, 0),
SelectorExclude: []string{"*"},
}
})
}

View file

@ -0,0 +1,99 @@
package kube_inventory
import (
"context"
corev1 "k8s.io/api/core/v1"
"github.com/influxdata/telegraf"
)
func collectNodes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
list, err := ki.client.getNodes(ctx, ki.NodeName)
if err != nil {
acc.AddError(err)
return
}
gatherNodeCount(len(list.Items), acc)
for i := range list.Items {
ki.gatherNode(&list.Items[i], acc)
}
}
func gatherNodeCount(count int, acc telegraf.Accumulator) {
fields := map[string]interface{}{"node_count": count}
tags := make(map[string]string)
acc.AddFields(nodeMeasurement, fields, tags)
}
func (ki *KubernetesInventory) gatherNode(n *corev1.Node, acc telegraf.Accumulator) {
fields := make(map[string]interface{}, len(n.Status.Capacity)+len(n.Status.Allocatable)+1)
tags := map[string]string{
"node_name": n.Name,
"cluster_namespace": n.Annotations["cluster.x-k8s.io/cluster-namespace"],
"version": n.Status.NodeInfo.KubeletVersion,
}
for resourceName, val := range n.Status.Capacity {
switch resourceName {
case "cpu":
fields["capacity_cpu_cores"] = ki.convertQuantity(val.String(), 1)
fields["capacity_millicpu_cores"] = ki.convertQuantity(val.String(), 1000)
case "memory":
fields["capacity_memory_bytes"] = ki.convertQuantity(val.String(), 1)
case "pods":
fields["capacity_pods"] = atoi(val.String())
}
}
for resourceName, val := range n.Status.Allocatable {
switch resourceName {
case "cpu":
fields["allocatable_cpu_cores"] = ki.convertQuantity(val.String(), 1)
fields["allocatable_millicpu_cores"] = ki.convertQuantity(val.String(), 1000)
case "memory":
fields["allocatable_memory_bytes"] = ki.convertQuantity(val.String(), 1)
case "pods":
fields["allocatable_pods"] = atoi(val.String())
}
}
for _, val := range n.Status.Conditions {
conditiontags := map[string]string{
"status": string(val.Status),
"condition": string(val.Type),
}
for k, v := range tags {
conditiontags[k] = v
}
running := 0
nodeready := 0
if val.Status == "True" {
if val.Type == "Ready" {
nodeready = 1
}
running = 1
} else if val.Status == "Unknown" {
if val.Type == "Ready" {
nodeready = 0
}
running = 2
}
conditionfields := map[string]interface{}{
"status_condition": running,
"ready": nodeready,
}
acc.AddFields(nodeMeasurement, conditionfields, conditiontags)
}
unschedulable := 0
if n.Spec.Unschedulable {
unschedulable = 1
}
fields["spec_unschedulable"] = unschedulable
acc.AddFields(nodeMeasurement, fields, tags)
}

View file

@ -0,0 +1,181 @@
package kube_inventory
import (
"testing"
"time"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestNode(t *testing.T) {
cli := &client{}
now := time.Now()
tests := []struct {
name string
handler *mockHandler
output []telegraf.Metric
hasError bool
}{
{
name: "no nodes",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/nodes/": corev1.NodeList{},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
nodeMeasurement,
map[string]string{},
map[string]interface{}{
"node_count": int64(0),
},
time.Unix(0, 0),
),
},
hasError: false,
},
{
name: "collect nodes",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/nodes/": corev1.NodeList{
Items: []corev1.Node{
{
Status: corev1.NodeStatus{
NodeInfo: corev1.NodeSystemInfo{
KernelVersion: "4.14.48-coreos-r2",
OSImage: "Container Linux by CoreOS 1745.7.0 (Rhyolite)",
ContainerRuntimeVersion: "docker://18.3.1",
KubeletVersion: "v1.10.3",
KubeProxyVersion: "v1.10.3",
},
Phase: "Running",
Capacity: corev1.ResourceList{
"cpu": resource.MustParse("16"),
"ephemeral_storage_bytes": resource.MustParse("49536401408"),
"hugepages_1Gi_bytes": resource.MustParse("0"),
"hugepages_2Mi_bytes": resource.MustParse("0"),
"memory": resource.MustParse("125817904Ki"),
"pods": resource.MustParse("110"),
},
Allocatable: corev1.ResourceList{
"cpu": resource.MustParse("1000m"),
"ephemeral_storage_bytes": resource.MustParse("44582761194"),
"hugepages_1Gi_bytes": resource.MustParse("0"),
"hugepages_2Mi_bytes": resource.MustParse("0"),
"memory": resource.MustParse("125715504Ki"),
"pods": resource.MustParse("110"),
},
Conditions: []corev1.NodeCondition{
{Type: "Ready", Status: "True", LastTransitionTime: metav1.Time{Time: now}},
},
},
Spec: corev1.NodeSpec{
ProviderID: "aws:///us-east-1c/i-0c00",
Taints: []corev1.Taint{
{
Key: "k1",
Value: "v1",
Effect: "NoExecute",
},
{
Key: "k2",
Value: "v2",
Effect: "NoSchedule",
},
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 11232,
Name: "node1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
Namespace: "ns1",
Annotations: map[string]string{
"cluster.x-k8s.io/cluster-namespace": "ns1",
},
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
nodeMeasurement,
map[string]string{
"node_name": "node1",
"cluster_namespace": "ns1",
"condition": "Ready",
"status": "True",
"version": "v1.10.3",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(1),
},
time.Unix(0, 0),
),
testutil.MustMetric(
nodeMeasurement,
map[string]string{
"node_name": "node1",
"cluster_namespace": "ns1",
"version": "v1.10.3",
},
map[string]interface{}{
"capacity_cpu_cores": int64(16),
"capacity_millicpu_cores": int64(16000),
"capacity_memory_bytes": int64(1.28837533696e+11),
"capacity_pods": int64(110),
"allocatable_cpu_cores": int64(1),
"allocatable_millicpu_cores": int64(1000),
"allocatable_memory_bytes": int64(1.28732676096e+11),
"allocatable_pods": int64(110),
"spec_unschedulable": int64(0),
},
time.Unix(0, 0),
),
},
hasError: false,
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
acc := new(testutil.Accumulator)
items := ((v.handler.responseMap["/nodes/"]).(corev1.NodeList)).Items
for i := range items {
ks.gatherNode(&items[i], acc)
}
err := acc.FirstError()
if v.hasError {
require.Errorf(t, err, "%s failed, should have error", v.name)
continue
}
// No error case
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
if v.name == "no nodes" {
nodeCount := len((v.handler.responseMap["/nodes/"]).(corev1.NodeList).Items)
gatherNodeCount(nodeCount, acc)
}
require.Len(t, acc.Metrics, len(v.output))
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
}
}

View file

@ -0,0 +1,47 @@
package kube_inventory
import (
"context"
"strings"
corev1 "k8s.io/api/core/v1"
"github.com/influxdata/telegraf"
)
func collectPersistentVolumes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
list, err := ki.client.getPersistentVolumes(ctx)
if err != nil {
acc.AddError(err)
return
}
for i := range list.Items {
gatherPersistentVolume(&list.Items[i], acc)
}
}
func gatherPersistentVolume(pv *corev1.PersistentVolume, acc telegraf.Accumulator) {
phaseType := 5
switch strings.ToLower(string(pv.Status.Phase)) {
case "bound":
phaseType = 0
case "failed":
phaseType = 1
case "pending":
phaseType = 2
case "released":
phaseType = 3
case "available":
phaseType = 4
}
fields := map[string]interface{}{
"phase_type": phaseType,
}
tags := map[string]string{
"pv_name": pv.Name,
"phase": string(pv.Status.Phase),
"storageclass": pv.Spec.StorageClassName,
}
acc.AddFields(persistentVolumeMeasurement, fields, tags)
}

View file

@ -0,0 +1,97 @@
package kube_inventory
import (
"testing"
"time"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestPersistentVolume(t *testing.T) {
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
tests := []struct {
name string
handler *mockHandler
output []telegraf.Metric
hasError bool
}{
{
name: "no pv",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/persistentvolumes/": &corev1.PersistentVolumeList{},
},
},
hasError: false,
},
{
name: "collect pvs",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/persistentvolumes/": &corev1.PersistentVolumeList{
Items: []corev1.PersistentVolume{
{
Status: corev1.PersistentVolumeStatus{
Phase: "pending",
},
Spec: corev1.PersistentVolumeSpec{
StorageClassName: "ebs-1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "pv1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_persistentvolume",
map[string]string{
"pv_name": "pv1",
"storageclass": "ebs-1",
"phase": "pending",
},
map[string]interface{}{
"phase_type": 2,
},
time.Unix(0, 0),
),
},
hasError: false,
},
}
for _, v := range tests {
acc := new(testutil.Accumulator)
items := ((v.handler.responseMap["/persistentvolumes/"]).(*corev1.PersistentVolumeList)).Items
for i := range items {
gatherPersistentVolume(&items[i], acc)
}
err := acc.FirstError()
if v.hasError {
require.Errorf(t, err, "%s failed, should have error", v.name)
continue
}
// No error case
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
require.Len(t, acc.Metrics, len(v.output))
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
}
}

View file

@ -0,0 +1,53 @@
package kube_inventory
import (
"context"
"strings"
corev1 "k8s.io/api/core/v1"
"github.com/influxdata/telegraf"
)
func collectPersistentVolumeClaims(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
list, err := ki.client.getPersistentVolumeClaims(ctx)
if err != nil {
acc.AddError(err)
return
}
for _, pvc := range list.Items {
ki.gatherPersistentVolumeClaim(pvc, acc)
}
}
func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc corev1.PersistentVolumeClaim, acc telegraf.Accumulator) {
phaseType := 3
switch strings.ToLower(string(pvc.Status.Phase)) {
case "bound":
phaseType = 0
case "lost":
phaseType = 1
case "pending":
phaseType = 2
}
fields := map[string]interface{}{
"phase_type": phaseType,
}
tags := map[string]string{
"pvc_name": pvc.Name,
"namespace": pvc.Namespace,
"phase": string(pvc.Status.Phase),
}
if pvc.Spec.StorageClassName != nil {
tags["storageclass"] = *pvc.Spec.StorageClassName
}
if pvc.Spec.Selector != nil {
for key, val := range pvc.Spec.Selector.MatchLabels {
if ki.selectorFilter.Match(key) {
tags["selector_"+key] = val
}
}
}
acc.AddFields(persistentVolumeClaimMeasurement, fields, tags)
}

View file

@ -0,0 +1,362 @@
package kube_inventory
import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestPersistentVolumeClaim(t *testing.T) {
cli := &client{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
tests := []struct {
name string
handler *mockHandler
output []telegraf.Metric
hasError bool
}{
{
name: "no pv claims",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{},
},
},
hasError: false,
},
{
name: "collect pv claims",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{
Items: []corev1.PersistentVolumeClaim{
{
Status: corev1.PersistentVolumeClaimStatus{
Phase: "bound",
},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8",
StorageClassName: toPtr("ebs-1"),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
},
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1",
Name: "pc1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_persistentvolumeclaim",
map[string]string{
"pvc_name": "pc1",
"namespace": "ns1",
"storageclass": "ebs-1",
"phase": "bound",
"selector_select1": "s1",
"selector_select2": "s2",
},
map[string]interface{}{
"phase_type": 0,
},
time.Unix(0, 0),
),
},
hasError: false,
},
{
name: "no label selectors",
hasError: false,
handler: &mockHandler{
responseMap: map[string]interface{}{
"/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{
Items: []corev1.PersistentVolumeClaim{
{
Status: corev1.PersistentVolumeClaimStatus{
Phase: "bound",
},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8",
StorageClassName: toPtr("ebs-1"),
Selector: nil,
},
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1",
Name: "pc1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_persistentvolumeclaim",
map[string]string{
"pvc_name": "pc1",
"namespace": "ns1",
"storageclass": "ebs-1",
"phase": "bound",
},
map[string]interface{}{
"phase_type": 0,
},
time.Unix(0, 0),
),
},
},
{
name: "no storage class name",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{
Items: []corev1.PersistentVolumeClaim{
{
Status: corev1.PersistentVolumeClaimStatus{
Phase: "bound",
},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8",
StorageClassName: nil,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
},
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1",
Name: "pc1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_persistentvolumeclaim",
map[string]string{
"pvc_name": "pc1",
"namespace": "ns1",
"phase": "bound",
"selector_select1": "s1",
"selector_select2": "s2",
},
map[string]interface{}{
"phase_type": 0,
},
time.Unix(0, 0),
),
},
hasError: false,
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*corev1.PersistentVolumeClaimList)).Items {
ks.gatherPersistentVolumeClaim(pvc, acc)
}
err := acc.FirstError()
if v.hasError {
require.Errorf(t, err, "%s failed, should have error", v.name)
continue
}
// No error case
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
require.Len(t, acc.Metrics, len(v.output))
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
}
}
func TestPersistentVolumeClaimSelectorFilter(t *testing.T) {
cli := &client{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
responseMap := map[string]interface{}{
"/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{
Items: []corev1.PersistentVolumeClaim{
{
Status: corev1.PersistentVolumeClaimStatus{
Phase: "bound",
},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8",
StorageClassName: toPtr("ebs-1"),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
},
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1",
Name: "pc1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
}
tests := []struct {
name string
handler *mockHandler
hasError bool
include []string
exclude []string
expected map[string]string
}{
{
name: "nil filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: nil,
exclude: nil,
expected: map[string]string{
"selector_select1": "s1",
"selector_select2": "s2",
},
},
{
name: "empty filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
expected: map[string]string{
"selector_select1": "s1",
"selector_select2": "s2",
},
},
{
name: "include filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"select1"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude filter equals only non-excluded selectors (overrides include filter)",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"select2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "include glob filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"*1"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"*2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"*2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
ks.SelectorInclude = v.include
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*corev1.PersistentVolumeClaimList)).Items {
ks.gatherPersistentVolumeClaim(pvc, acc)
}
// Grab selector tags
actual := map[string]string{}
for _, metric := range acc.Metrics {
for key, val := range metric.Tags {
if strings.Contains(key, "selector_") {
actual[key] = val
}
}
}
require.Equalf(t, v.expected, actual,
"actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
}
}

View file

@ -0,0 +1,169 @@
package kube_inventory
import (
"context"
"strings"
corev1 "k8s.io/api/core/v1"
"github.com/influxdata/telegraf"
)
func collectPods(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
var list corev1.PodList
listRef := &list
var err error
if ki.KubeletURL != "" {
err = ki.queryPodsFromKubelet(ki.KubeletURL+"/pods", listRef)
} else {
listRef, err = ki.client.getPods(ctx, ki.NodeName)
}
if err != nil {
acc.AddError(err)
return
}
for i := range listRef.Items {
ki.gatherPod(&listRef.Items[i], acc)
}
}
func (ki *KubernetesInventory) gatherPod(p *corev1.Pod, acc telegraf.Accumulator) {
creationTS := p.GetCreationTimestamp()
if creationTS.IsZero() {
return
}
containerList := make(map[string]*corev1.ContainerStatus, len(p.Status.ContainerStatuses))
for i := range p.Status.ContainerStatuses {
containerList[p.Status.ContainerStatuses[i].Name] = &p.Status.ContainerStatuses[i]
}
for _, c := range p.Spec.Containers {
cs, ok := containerList[c.Name]
if !ok {
cs = &corev1.ContainerStatus{}
}
ki.gatherPodContainer(p, *cs, c, acc)
}
}
func (ki *KubernetesInventory) gatherPodContainer(p *corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) {
stateCode := 3
stateReason := ""
state := "unknown"
readiness := "unready"
switch {
case cs.State.Running != nil:
stateCode = 0
state = "running"
case cs.State.Terminated != nil:
stateCode = 1
state = "terminated"
stateReason = cs.State.Terminated.Reason
case cs.State.Waiting != nil:
stateCode = 2
state = "waiting"
stateReason = cs.State.Waiting.Reason
}
if cs.Ready {
readiness = "ready"
}
fields := map[string]interface{}{
"restarts_total": cs.RestartCount,
"state_code": stateCode,
}
// deprecated in 1.15: use `state_reason` instead
if state == "terminated" {
fields["terminated_reason"] = stateReason
}
if stateReason != "" {
fields["state_reason"] = stateReason
}
phaseReason := p.Status.Reason
if phaseReason != "" {
fields["phase_reason"] = phaseReason
}
tags := map[string]string{
"container_name": c.Name,
"namespace": p.Namespace,
"node_name": p.Spec.NodeName,
"pod_name": p.Name,
"phase": string(p.Status.Phase),
"state": state,
"readiness": readiness,
}
splitImage := strings.Split(c.Image, ":")
if len(splitImage) == 2 {
tags["version"] = splitImage[1]
}
tags["image"] = splitImage[0]
for key, val := range p.Spec.NodeSelector {
if ki.selectorFilter.Match(key) {
tags["node_selector_"+key] = val
}
}
req := c.Resources.Requests
lim := c.Resources.Limits
for resourceName, val := range req {
switch resourceName {
case "cpu":
fields["resource_requests_millicpu_units"] = ki.convertQuantity(val.String(), 1000)
case "memory":
fields["resource_requests_memory_bytes"] = ki.convertQuantity(val.String(), 1)
}
}
for resourceName, val := range lim {
switch resourceName {
case "cpu":
fields["resource_limits_millicpu_units"] = ki.convertQuantity(val.String(), 1000)
case "memory":
fields["resource_limits_memory_bytes"] = ki.convertQuantity(val.String(), 1)
}
}
for _, val := range p.Status.Conditions {
conditiontags := map[string]string{
"container_name": c.Name,
"image": splitImage[0],
"status": string(val.Status),
"namespace": p.Namespace,
"node_name": p.Spec.NodeName,
"pod_name": p.Name,
"condition": string(val.Type),
}
if len(splitImage) == 2 {
conditiontags["version"] = splitImage[1]
}
running := 0
podready := 0
if val.Status == "True" {
if val.Type == "Ready" {
podready = 1
}
running = 1
} else if val.Status == "Unknown" {
if val.Type == "Ready" {
podready = 0
}
running = 2
}
conditionfields := map[string]interface{}{
"status_condition": running,
"ready": podready,
}
acc.AddFields(podContainerMeasurement, conditionfields, conditiontags)
}
acc.AddFields(podContainerMeasurement, fields, tags)
}

View file

@ -0,0 +1,998 @@
package kube_inventory
import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestPod(t *testing.T) {
cli := &client{}
now := time.Now()
started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location())
created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 0, 0, now.Location())
cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location())
cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location())
tests := []struct {
name string
handler *mockHandler
output []telegraf.Metric
hasError bool
}{
{
name: "no pods",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/pods/": &corev1.PodList{},
},
},
hasError: false,
},
{
name: "collect pods",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/pods/": &corev1.PodList{
Items: []corev1.Pod{
{
Spec: corev1.PodSpec{
NodeName: "node1",
Containers: []corev1.Container{
{
Name: "running",
Image: "image1",
Ports: []corev1.ContainerPort{
{
ContainerPort: 8080,
Protocol: "TCP",
},
},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
},
Requests: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
},
},
},
{
Name: "completed",
Image: "image1",
Ports: []corev1.ContainerPort{
{
ContainerPort: 8080,
Protocol: "TCP",
},
},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
},
Requests: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
},
},
},
{
Name: "waiting",
Image: "image1",
Ports: []corev1.ContainerPort{
{
ContainerPort: 8080,
Protocol: "TCP",
},
},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
},
Requests: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "vol1",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "pc1",
ReadOnly: true,
},
},
},
{
Name: "vol2",
},
},
NodeSelector: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
Status: corev1.PodStatus{
Phase: "Running",
HostIP: "180.12.10.18",
PodIP: "10.244.2.15",
StartTime: &metav1.Time{Time: started},
Conditions: []corev1.PodCondition{
{
Type: "Initialized",
Status: "True",
LastTransitionTime: metav1.Time{Time: cond1},
},
{
Type: "Ready",
Status: "True",
LastTransitionTime: metav1.Time{Time: cond2},
},
{
Type: "Scheduled",
Status: "True",
LastTransitionTime: metav1.Time{Time: cond1},
},
},
ContainerStatuses: []corev1.ContainerStatus{
{
Name: "running",
State: corev1.ContainerState{
Running: &corev1.ContainerStateRunning{
StartedAt: metav1.Time{Time: started},
},
},
Ready: true,
RestartCount: 3,
Image: "image1",
ImageID: "image_id1",
ContainerID: "docker://54abe32d0094479d3d",
},
{
Name: "completed",
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
StartedAt: metav1.Time{Time: now},
ExitCode: 0,
Reason: "Completed",
},
},
Ready: false,
RestartCount: 3,
Image: "image1",
ImageID: "image_id1",
ContainerID: "docker://54abe32d0094479d3d",
},
{
Name: "waiting",
State: corev1.ContainerState{
Waiting: &corev1.ContainerStateWaiting{
Reason: "PodUninitialized",
},
},
Ready: false,
RestartCount: 3,
Image: "image1",
ImageID: "image_id1",
ContainerID: "docker://54abe32d0094479d3d",
},
},
},
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "DaemonSet",
Name: "forwarder",
Controller: toPtr(true),
},
},
Generation: 11232,
Namespace: "ns1",
Name: "pod1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: metav1.Time{Time: created},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Initialized",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "running",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(0),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Ready",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "running",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(1),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Scheduled",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "running",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(0),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"namespace": "ns1",
"container_name": "running",
"node_name": "node1",
"pod_name": "pod1",
"image": "image1",
"phase": "Running",
"state": "running",
"readiness": "ready",
"node_selector_select1": "s1",
"node_selector_select2": "s2",
},
map[string]interface{}{
"restarts_total": int32(3),
"state_code": 0,
"resource_requests_millicpu_units": int64(100),
"resource_limits_millicpu_units": int64(100),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Initialized",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "completed",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(0),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Ready",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "completed",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(1),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Scheduled",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "completed",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(0),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"namespace": "ns1",
"container_name": "completed",
"image": "image1",
"node_name": "node1",
"pod_name": "pod1",
"phase": "Running",
"state": "terminated",
"readiness": "unready",
"node_selector_select1": "s1",
"node_selector_select2": "s2",
},
map[string]interface{}{
"restarts_total": int32(3),
"state_code": 1,
"state_reason": "Completed",
"resource_requests_millicpu_units": int64(100),
"resource_limits_millicpu_units": int64(100),
"terminated_reason": "Completed",
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Initialized",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "waiting",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(0),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Ready",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "waiting",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(1),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Scheduled",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "waiting",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(0),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"namespace": "ns1",
"container_name": "waiting",
"node_name": "node1",
"image": "image1",
"pod_name": "pod1",
"phase": "Running",
"state": "waiting",
"readiness": "unready",
"node_selector_select1": "s1",
"node_selector_select2": "s2",
},
map[string]interface{}{
"restarts_total": int32(3),
"state_code": 2,
"state_reason": "PodUninitialized",
"resource_requests_millicpu_units": int64(100),
"resource_limits_millicpu_units": int64(100),
},
time.Unix(0, 0),
),
},
hasError: false,
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
items := ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items
for i := range items {
ks.gatherPod(&items[i], acc)
}
err := acc.FirstError()
if v.hasError {
require.Errorf(t, err, "%s failed, should have error", v.name)
continue
}
// No error case
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
require.Len(t, acc.Metrics, len(v.output))
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
}
}
func TestPodSelectorFilter(t *testing.T) {
cli := &client{}
now := time.Now()
started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location())
created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location())
cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location())
cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location())
responseMap := map[string]interface{}{
"/pods/": &corev1.PodList{
Items: []corev1.Pod{
{
Spec: corev1.PodSpec{
NodeName: "node1",
Containers: []corev1.Container{
{
Name: "forwarder",
Image: "image1",
Ports: []corev1.ContainerPort{
{
ContainerPort: 8080,
Protocol: "TCP",
},
},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
},
Requests: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "vol1",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "pc1",
ReadOnly: true,
},
},
},
{
Name: "vol2",
},
},
NodeSelector: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
Status: corev1.PodStatus{
Phase: "Running",
HostIP: "180.12.10.18",
PodIP: "10.244.2.15",
StartTime: &metav1.Time{Time: started},
Conditions: []corev1.PodCondition{
{
Type: "Initialized",
Status: "True",
LastTransitionTime: metav1.Time{Time: cond1},
},
{
Type: "Ready",
Status: "True",
LastTransitionTime: metav1.Time{Time: cond2},
},
{
Type: "Scheduled",
Status: "True",
LastTransitionTime: metav1.Time{Time: cond1},
},
},
ContainerStatuses: []corev1.ContainerStatus{
{
Name: "forwarder",
State: corev1.ContainerState{
Running: &corev1.ContainerStateRunning{
StartedAt: metav1.Time{Time: now},
},
},
Ready: true,
RestartCount: 3,
Image: "image1",
ImageID: "image_id1",
ContainerID: "docker://54abe32d0094479d3d",
},
},
},
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "DaemonSet",
Name: "forwarder",
Controller: toPtr(true),
},
},
Generation: 11232,
Namespace: "ns1",
Name: "pod1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: metav1.Time{Time: created},
},
},
},
},
}
tests := []struct {
name string
handler *mockHandler
hasError bool
include []string
exclude []string
expected map[string]string
}{
{
name: "nil filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: nil,
exclude: nil,
expected: map[string]string{
"node_selector_select1": "s1",
"node_selector_select2": "s2",
},
},
{
name: "empty filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
expected: map[string]string{
"node_selector_select1": "s1",
"node_selector_select2": "s2",
},
},
{
name: "include filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"select1"},
expected: map[string]string{
"node_selector_select1": "s1",
},
},
{
name: "exclude filter equals only non-excluded selectors (overrides include filter)",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"select2"},
expected: map[string]string{
"node_selector_select1": "s1",
},
},
{
name: "include glob filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"*1"},
expected: map[string]string{
"node_selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"*2"},
expected: map[string]string{
"node_selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"*2"},
expected: map[string]string{
"node_selector_select1": "s1",
},
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
ks.SelectorInclude = v.include
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
items := ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items
for i := range items {
ks.gatherPod(&items[i], acc)
}
// Grab selector tags
actual := map[string]string{}
for _, metric := range acc.Metrics {
for key, val := range metric.Tags {
if strings.Contains(key, "node_selector_") {
actual[key] = val
}
}
}
require.Equalf(t, v.expected, actual,
"actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
}
}
func TestPodPendingContainers(t *testing.T) {
cli := &client{}
now := time.Now()
started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location())
created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location())
cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location())
cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location())
tests := []struct {
name string
handler *mockHandler
output []telegraf.Metric
hasError bool
}{
{
name: "collect pods",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/pods/": &corev1.PodList{
Items: []corev1.Pod{
{
Spec: corev1.PodSpec{
NodeName: "node1",
Containers: []corev1.Container{
{
Name: "waiting",
Image: "image1",
Ports: []corev1.ContainerPort{
{
ContainerPort: 8080,
Protocol: "TCP",
},
},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
},
Requests: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
},
},
},
{
Name: "terminated",
Image: "image1",
Ports: []corev1.ContainerPort{
{
ContainerPort: 8080,
Protocol: "TCP",
},
},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
},
Requests: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "vol1",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "pc1",
ReadOnly: true,
},
},
},
{
Name: "vol2",
},
},
NodeSelector: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
Status: corev1.PodStatus{
Phase: "Pending",
Reason: "NetworkNotReady",
HostIP: "180.12.10.18",
PodIP: "10.244.2.15",
StartTime: &metav1.Time{Time: started},
Conditions: []corev1.PodCondition{
{
Type: "Initialized",
Status: "True",
LastTransitionTime: metav1.Time{Time: cond1},
},
{
Type: "Ready",
Status: "True",
LastTransitionTime: metav1.Time{Time: cond2},
},
{
Type: "Scheduled",
Status: "True",
LastTransitionTime: metav1.Time{Time: cond1},
},
},
},
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "DaemonSet",
Name: "forwarder",
Controller: toPtr(true),
},
},
Generation: 11232,
Namespace: "ns1",
Name: "pod1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: metav1.Time{Time: created},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Initialized",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "waiting",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(0),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Ready",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "waiting",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(1),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Scheduled",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "waiting",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(0),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"namespace": "ns1",
"container_name": "waiting",
"node_name": "node1",
"pod_name": "pod1",
"image": "image1",
"phase": "Pending",
"state": "unknown",
"readiness": "unready",
"node_selector_select1": "s1",
"node_selector_select2": "s2",
},
map[string]interface{}{
"phase_reason": "NetworkNotReady",
"restarts_total": int32(0),
"state_code": 3,
"resource_requests_millicpu_units": int64(100),
"resource_limits_millicpu_units": int64(100),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Initialized",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "terminated",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(0),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Ready",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "terminated",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(1),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"pod_name": "pod1",
"condition": "Scheduled",
"status": "True",
"image": "image1",
"node_name": "node1",
"namespace": "ns1",
"container_name": "terminated",
},
map[string]interface{}{
"status_condition": int64(1),
"ready": int64(0),
},
time.Unix(0, 0),
),
testutil.MustMetric(
podContainerMeasurement,
map[string]string{
"namespace": "ns1",
"container_name": "terminated",
"node_name": "node1",
"pod_name": "pod1",
"image": "image1",
"phase": "Pending",
"state": "unknown",
"readiness": "unready",
"node_selector_select1": "s1",
"node_selector_select2": "s2",
},
map[string]interface{}{
"phase_reason": "NetworkNotReady",
"restarts_total": int32(0),
"state_code": 3,
"resource_requests_millicpu_units": int64(100),
"resource_limits_millicpu_units": int64(100),
},
time.Unix(0, 0),
),
},
hasError: false,
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
items := ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items
for i := range items {
ks.gatherPod(&items[i], acc)
}
err := acc.FirstError()
if v.hasError {
require.Errorf(t, err, "%s failed, should have error", v.name)
continue
}
// No error case
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
require.Len(t, acc.Metrics, len(v.output))
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
}
}

View file

@ -0,0 +1,77 @@
package kube_inventory
import (
"context"
"strings"
corev1 "k8s.io/api/core/v1"
"github.com/influxdata/telegraf"
)
func collectResourceQuotas(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
list, err := ki.client.getResourceQuotas(ctx)
if err != nil {
acc.AddError(err)
return
}
for _, i := range list.Items {
ki.gatherResourceQuota(i, acc)
}
}
func (ki *KubernetesInventory) gatherResourceQuota(r corev1.ResourceQuota, acc telegraf.Accumulator) {
fields := make(map[string]interface{}, len(r.Status.Hard)+len(r.Status.Used))
tags := map[string]string{
"resource": r.Name,
"namespace": r.Namespace,
}
for resourceName, val := range r.Status.Hard {
switch resourceName {
case "cpu", "limits.cpu", "requests.cpu":
key := "hard_cpu"
if strings.Contains(string(resourceName), "limits") {
key = key + "_limits"
} else if strings.Contains(string(resourceName), "requests") {
key = key + "_requests"
}
fields[key] = ki.convertQuantity(val.String(), 1)
case "memory", "limits.memory", "requests.memory":
key := "hard_memory"
if strings.Contains(string(resourceName), "limits") {
key = key + "_limits"
} else if strings.Contains(string(resourceName), "requests") {
key = key + "_requests"
}
fields[key] = ki.convertQuantity(val.String(), 1)
case "pods":
fields["hard_pods"] = atoi(val.String())
}
}
for resourceName, val := range r.Status.Used {
switch resourceName {
case "cpu", "requests.cpu", "limits.cpu":
key := "used_cpu"
if strings.Contains(string(resourceName), "limits") {
key = key + "_limits"
} else if strings.Contains(string(resourceName), "requests") {
key = key + "_requests"
}
fields[key] = ki.convertQuantity(val.String(), 1)
case "memory", "requests.memory", "limits.memory":
key := "used_memory"
if strings.Contains(string(resourceName), "limits") {
key = key + "_limits"
} else if strings.Contains(string(resourceName), "requests") {
key = key + "_requests"
}
fields[key] = ki.convertQuantity(val.String(), 1)
case "pods":
fields["used_pods"] = atoi(val.String())
}
}
acc.AddFields(resourcequotaMeasurement, fields, tags)
}

View file

@ -0,0 +1,112 @@
package kube_inventory
import (
"testing"
"time"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestResourceQuota(t *testing.T) {
cli := &client{}
now := time.Now()
tests := []struct {
name string
handler *mockHandler
output []telegraf.Metric
hasError bool
}{
{
name: "no ressourcequota",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/resourcequotas/": corev1.ResourceQuotaList{},
},
},
hasError: false,
},
{
name: "collect resourceqota",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/resourcequotas/": corev1.ResourceQuotaList{
Items: []corev1.ResourceQuota{
{
Status: corev1.ResourceQuotaStatus{
Hard: corev1.ResourceList{
"cpu": resource.MustParse("16"),
"memory": resource.MustParse("125817904Ki"),
"pods": resource.MustParse("110"),
},
Used: corev1.ResourceList{
"cpu": resource.MustParse("10"),
"memory": resource.MustParse("125715504Ki"),
"pods": resource.MustParse("0"),
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 11232,
Namespace: "ns1",
Name: "rq1",
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
resourcequotaMeasurement,
map[string]string{
"resource": "rq1",
"namespace": "ns1",
},
map[string]interface{}{
"hard_cpu": int64(16),
"hard_memory": int64(1.28837533696e+11),
"hard_pods": int64(110),
"used_cpu": int64(10),
"used_memory": int64(1.28732676096e+11),
"used_pods": int64(0),
},
time.Unix(0, 0),
),
},
hasError: false,
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
acc := new(testutil.Accumulator)
for _, quota := range ((v.handler.responseMap["/resourcequotas/"]).(corev1.ResourceQuotaList)).Items {
ks.gatherResourceQuota(quota, acc)
}
err := acc.FirstError()
if v.hasError {
require.Errorf(t, err, "%s failed, should have error", v.name)
continue
}
// No error case
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
require.Len(t, acc.Metrics, len(v.output))
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
}
}

View file

@ -0,0 +1,53 @@
# Read metrics from the Kubernetes api
[[inputs.kube_inventory]]
## URL for the Kubernetes API.
## If empty in-cluster config with POD's service account token will be used.
# url = ""
## URL for the kubelet, if set it will be used to collect the pods resource metrics
# url_kubelet = "http://127.0.0.1:10255"
## Namespace to use. Set to "" to use all namespaces.
# namespace = "default"
## Node name to filter to. No filtering by default.
# node_name = ""
## Use bearer token for authorization.
## Ignored if url is empty and in-cluster config is used.
# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token"
## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
## Optional Resources to exclude from gathering
## Leave them with blank with try to gather everything available.
## Values can be - "daemonsets", deployments", "endpoints", "ingress",
## "nodes", "persistentvolumes", "persistentvolumeclaims", "pods", "services",
## "statefulsets"
# resource_exclude = [ "deployments", "nodes", "statefulsets" ]
## Optional Resources to include when gathering
## Overrides resource_exclude if both set.
# resource_include = [ "deployments", "nodes", "statefulsets" ]
## selectors to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all selectors as tags
## selector_exclude overrides selector_include if both set.
# selector_include = []
# selector_exclude = ["*"]
## Optional TLS Config
## Trusted root certificates for server
# tls_ca = "/path/to/cafile"
## Used for TLS client certificate authentication
# tls_cert = "/path/to/certfile"
## Used for TLS client certificate authentication
# tls_key = "/path/to/keyfile"
## Send the specified TLS server name via SNI
# tls_server_name = "kubernetes.example.com"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Uncomment to remove deprecated metrics.
# fieldexclude = ["terminated_reason"]

View file

@ -0,0 +1,71 @@
package kube_inventory
import (
"context"
corev1 "k8s.io/api/core/v1"
"github.com/influxdata/telegraf"
)
func collectServices(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
list, err := ki.client.getServices(ctx)
if err != nil {
acc.AddError(err)
return
}
for i := range list.Items {
ki.gatherService(&list.Items[i], acc)
}
}
func (ki *KubernetesInventory) gatherService(s *corev1.Service, acc telegraf.Accumulator) {
creationTS := s.GetCreationTimestamp()
if creationTS.IsZero() {
return
}
fields := map[string]interface{}{
"created": s.GetCreationTimestamp().UnixNano(),
"generation": s.Generation,
}
tags := map[string]string{
"service_name": s.Name,
"namespace": s.Namespace,
}
for key, val := range s.Spec.Selector {
if ki.selectorFilter.Match(key) {
tags["selector_"+key] = val
}
}
var getPorts = func() {
for _, port := range s.Spec.Ports {
fields["port"] = port.Port
fields["target_port"] = port.TargetPort.IntVal
tags["port_name"] = port.Name
tags["port_protocol"] = string(port.Protocol)
if s.Spec.Type == "ExternalName" {
tags["external_name"] = s.Spec.ExternalName
} else {
tags["cluster_ip"] = s.Spec.ClusterIP
}
acc.AddFields(serviceMeasurement, fields, tags)
}
}
if externIPs := s.Spec.ExternalIPs; externIPs != nil {
for _, ip := range externIPs {
tags["ip"] = ip
getPorts()
}
} else {
getPorts()
}
}

View file

@ -0,0 +1,281 @@
package kube_inventory
import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestService(t *testing.T) {
cli := &client{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
tests := []struct {
name string
handler *mockHandler
output []telegraf.Metric
hasError bool
include []string
exclude []string
}{
{
name: "no service",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/service/": &corev1.ServiceList{},
},
},
hasError: false,
},
{
name: "collect service",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/service/": &corev1.ServiceList{
Items: []corev1.Service{
{
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Port: 8080,
TargetPort: intstr.IntOrString{
IntVal: 1234,
},
Name: "diagnostic",
Protocol: "TCP",
},
},
ExternalIPs: []string{"1.0.0.127"},
ClusterIP: "127.0.0.1",
Selector: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 12,
Namespace: "ns1",
Name: "checker",
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_service",
map[string]string{
"service_name": "checker",
"namespace": "ns1",
"port_name": "diagnostic",
"port_protocol": "TCP",
"cluster_ip": "127.0.0.1",
"ip": "1.0.0.127",
"selector_select1": "s1",
"selector_select2": "s2",
},
map[string]interface{}{
"port": int32(8080),
"target_port": int32(1234),
"generation": int64(12),
"created": now.UnixNano(),
},
time.Unix(0, 0),
),
},
hasError: false,
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
ks.SelectorInclude = v.include
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
items := ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items
for i := range items {
ks.gatherService(&items[i], acc)
}
err := acc.FirstError()
if v.hasError {
require.Errorf(t, err, "%s failed, should have error", v.name)
continue
}
// No error case
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
require.Len(t, acc.Metrics, len(v.output))
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
}
}
func TestServiceSelectorFilter(t *testing.T) {
cli := &client{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
responseMap := map[string]interface{}{
"/service/": &corev1.ServiceList{
Items: []corev1.Service{
{
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Port: 8080,
TargetPort: intstr.IntOrString{
IntVal: 1234,
},
Name: "diagnostic",
Protocol: "TCP",
},
},
ExternalIPs: []string{"1.0.0.127"},
ClusterIP: "127.0.0.1",
Selector: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 12,
Namespace: "ns1",
Name: "checker",
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
}
tests := []struct {
name string
handler *mockHandler
hasError bool
include []string
exclude []string
expected map[string]string
}{
{
name: "nil filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: nil,
exclude: nil,
expected: map[string]string{
"selector_select1": "s1",
"selector_select2": "s2",
},
},
{
name: "empty filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
expected: map[string]string{
"selector_select1": "s1",
"selector_select2": "s2",
},
},
{
name: "include filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"select1"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude filter equals only non-excluded selectors (overrides include filter)",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"select2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "include glob filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"*1"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"*2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"*2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
ks.SelectorInclude = v.include
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
items := ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items
for i := range items {
ks.gatherService(&items[i], acc)
}
// Grab selector tags
actual := map[string]string{}
for _, metric := range acc.Metrics {
for key, val := range metric.Tags {
if strings.Contains(key, "selector_") {
actual[key] = val
}
}
}
require.Equalf(t, v.expected, actual,
"actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
}
}

View file

@ -0,0 +1,49 @@
package kube_inventory
import (
"context"
"k8s.io/api/apps/v1"
"github.com/influxdata/telegraf"
)
func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
list, err := ki.client.getStatefulSets(ctx)
if err != nil {
acc.AddError(err)
return
}
for i := range list.Items {
ki.gatherStatefulSet(&list.Items[i], acc)
}
}
func (ki *KubernetesInventory) gatherStatefulSet(s *v1.StatefulSet, acc telegraf.Accumulator) {
status := s.Status
fields := map[string]interface{}{
"created": s.GetCreationTimestamp().UnixNano(),
"generation": s.Generation,
"replicas": status.Replicas,
"replicas_current": status.CurrentReplicas,
"replicas_ready": status.ReadyReplicas,
"replicas_updated": status.UpdatedReplicas,
"observed_generation": s.Status.ObservedGeneration,
}
if s.Spec.Replicas != nil {
fields["spec_replicas"] = *s.Spec.Replicas
}
tags := map[string]string{
"statefulset_name": s.Name,
"namespace": s.Namespace,
}
if s.Spec.Selector != nil {
for key, val := range s.Spec.Selector.MatchLabels {
if ki.selectorFilter.Match(key) {
tags["selector_"+key] = val
}
}
}
acc.AddFields(statefulSetMeasurement, fields, tags)
}

View file

@ -0,0 +1,378 @@
package kube_inventory
import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
func TestStatefulSet(t *testing.T) {
cli := &client{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
tests := []struct {
name string
handler *mockHandler
output []telegraf.Metric
hasError bool
}{
{
name: "no statefulsets",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/statefulsets/": &v1.StatefulSetList{},
},
},
hasError: false,
},
{
name: "collect statefulsets",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/statefulsets/": &v1.StatefulSetList{
Items: []v1.StatefulSet{
{
Status: v1.StatefulSetStatus{
Replicas: 2,
CurrentReplicas: 4,
ReadyReplicas: 1,
UpdatedReplicas: 3,
ObservedGeneration: 119,
},
Spec: v1.StatefulSetSpec{
Replicas: toPtr(int32(3)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 332,
Namespace: "ns1",
Name: "sts1",
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_statefulset",
map[string]string{
"namespace": "ns1",
"statefulset_name": "sts1",
"selector_select1": "s1",
"selector_select2": "s2",
},
map[string]interface{}{
"generation": int64(332),
"observed_generation": int64(119),
"created": now.UnixNano(),
"spec_replicas": int32(3),
"replicas": int32(2),
"replicas_current": int32(4),
"replicas_ready": int32(1),
"replicas_updated": int32(3),
},
time.Unix(0, 0),
),
},
hasError: false,
},
{
name: "no label selector",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/statefulsets/": &v1.StatefulSetList{
Items: []v1.StatefulSet{
{
Status: v1.StatefulSetStatus{
Replicas: 2,
CurrentReplicas: 4,
ReadyReplicas: 1,
UpdatedReplicas: 3,
ObservedGeneration: 119,
},
Spec: v1.StatefulSetSpec{
Replicas: toPtr(int32(3)),
Selector: nil,
},
ObjectMeta: metav1.ObjectMeta{
Generation: 332,
Namespace: "ns1",
Name: "sts1",
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_statefulset",
map[string]string{
"namespace": "ns1",
"statefulset_name": "sts1",
},
map[string]interface{}{
"generation": int64(332),
"observed_generation": int64(119),
"created": now.UnixNano(),
"spec_replicas": int32(3),
"replicas": int32(2),
"replicas_current": int32(4),
"replicas_ready": int32(1),
"replicas_updated": int32(3),
},
time.Unix(0, 0),
),
},
hasError: false,
},
{
name: "no desired number of replicas",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/statefulsets/": &v1.StatefulSetList{
Items: []v1.StatefulSet{
{
Status: v1.StatefulSetStatus{
Replicas: 2,
CurrentReplicas: 4,
ReadyReplicas: 1,
UpdatedReplicas: 3,
ObservedGeneration: 119,
},
Spec: v1.StatefulSetSpec{
Replicas: nil,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 332,
Namespace: "ns1",
Name: "sts1",
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
},
},
output: []telegraf.Metric{
testutil.MustMetric(
"kubernetes_statefulset",
map[string]string{
"namespace": "ns1",
"statefulset_name": "sts1",
"selector_select1": "s1",
"selector_select2": "s2",
},
map[string]interface{}{
"generation": int64(332),
"observed_generation": int64(119),
"created": now.UnixNano(),
"replicas": int32(2),
"replicas_current": int32(4),
"replicas_ready": int32(1),
"replicas_updated": int32(3),
},
time.Unix(0, 0),
),
},
hasError: false,
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
require.NoError(t, ks.createSelectorFilters())
acc := &testutil.Accumulator{}
items := ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items
for i := range items {
ks.gatherStatefulSet(&items[i], acc)
}
err := acc.FirstError()
if v.hasError {
require.Errorf(t, err, "%s failed, should have error", v.name)
continue
}
// No error case
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
require.Len(t, acc.Metrics, len(v.output))
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
}
}
func TestStatefulSetSelectorFilter(t *testing.T) {
cli := &client{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
responseMap := map[string]interface{}{
"/statefulsets/": &v1.StatefulSetList{
Items: []v1.StatefulSet{
{
Status: v1.StatefulSetStatus{
Replicas: 2,
CurrentReplicas: 4,
ReadyReplicas: 1,
UpdatedReplicas: 3,
ObservedGeneration: 119,
},
Spec: v1.StatefulSetSpec{
Replicas: toPtr(int32(3)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
},
ObjectMeta: metav1.ObjectMeta{
Generation: 332,
Namespace: "ns1",
Name: "sts1",
CreationTimestamp: metav1.Time{Time: now},
},
},
},
},
}
tests := []struct {
name string
handler *mockHandler
hasError bool
include []string
exclude []string
expected map[string]string
}{
{
name: "nil filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: nil,
exclude: nil,
expected: map[string]string{
"selector_select1": "s1",
"selector_select2": "s2",
},
},
{
name: "empty filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
expected: map[string]string{
"selector_select1": "s1",
"selector_select2": "s2",
},
},
{
name: "include filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"select1"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude filter equals only non-excluded selectors (overrides include filter)",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"select2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "include glob filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"*1"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"*2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
exclude: []string{"*2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
ks.SelectorInclude = v.include
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
items := ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items
for i := range items {
ks.gatherStatefulSet(&items[i], acc)
}
// Grab selector tags
actual := map[string]string{}
for _, metric := range acc.Metrics {
for key, val := range metric.Tags {
if strings.Contains(key, "selector_") {
actual[key] = val
}
}
}
require.Equalf(t, v.expected, actual,
"actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
}
}