Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
137
plugins/inputs/jenkins/README.md
Normal file
137
plugins/inputs/jenkins/README.md
Normal file
|
@ -0,0 +1,137 @@
|
|||
# Jenkins Input Plugin
|
||||
|
||||
This plugin gathers information about the nodes and jobs running in a
|
||||
[Jenkins][jenkins] instance. The plugin uses the Jenkins API and does not
|
||||
require a plugin on the server.
|
||||
|
||||
⭐ Telegraf v1.9.0
|
||||
🏷️ applications
|
||||
💻 all
|
||||
|
||||
[jenkins]: https://www.jenkins.io/
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Read jobs and cluster metrics from Jenkins instances
|
||||
[[inputs.jenkins]]
|
||||
## The Jenkins URL in the format "schema://host:port"
|
||||
url = "http://my-jenkins-instance:8080"
|
||||
# username = "admin"
|
||||
# password = "admin"
|
||||
|
||||
## Set response_timeout
|
||||
response_timeout = "5s"
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Optional Max Job Build Age filter
|
||||
## Default 1 hour, ignore builds older than max_build_age
|
||||
# max_build_age = "1h"
|
||||
|
||||
## Optional Sub Job Depth filter
|
||||
## Jenkins can have unlimited layer of sub jobs
|
||||
## This config will limit the layers of pulling, default value 0 means
|
||||
## unlimited pulling until no more sub jobs
|
||||
# max_subjob_depth = 0
|
||||
|
||||
## Optional Sub Job Per Layer
|
||||
## In workflow-multibranch-plugin, each branch will be created as a sub job.
|
||||
## This config will limit to call only the lasted branches in each layer,
|
||||
## empty will use default value 10
|
||||
# max_subjob_per_layer = 10
|
||||
|
||||
## Jobs to include or exclude from gathering
|
||||
## When using both lists, job_exclude has priority.
|
||||
## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"]
|
||||
# job_include = [ "*" ]
|
||||
# job_exclude = [ ]
|
||||
|
||||
## Nodes to include or exclude from gathering
|
||||
## When using both lists, node_exclude has priority.
|
||||
# node_include = [ "*" ]
|
||||
# node_exclude = [ ]
|
||||
|
||||
## Worker pool for jenkins plugin only
|
||||
## Empty this field will use default value 5
|
||||
# max_connections = 5
|
||||
|
||||
## When set to true will add node labels as a comma-separated tag. If none,
|
||||
## are found, then a tag with the value of 'none' is used. Finally, if a
|
||||
## label contains a comma it is replaced with an underscore.
|
||||
# node_labels_as_tag = false
|
||||
```
|
||||
|
||||
## Metrics
|
||||
|
||||
- jenkins
|
||||
- tags:
|
||||
- source
|
||||
- port
|
||||
- fields:
|
||||
- busy_executors
|
||||
- total_executors
|
||||
|
||||
- jenkins_node
|
||||
- tags:
|
||||
- arch
|
||||
- disk_path
|
||||
- temp_path
|
||||
- node_name
|
||||
- status ("online", "offline")
|
||||
- source
|
||||
- port
|
||||
- fields:
|
||||
- disk_available (Bytes)
|
||||
- temp_available (Bytes)
|
||||
- memory_available (Bytes)
|
||||
- memory_total (Bytes)
|
||||
- swap_available (Bytes)
|
||||
- swap_total (Bytes)
|
||||
- response_time (ms)
|
||||
- num_executors
|
||||
|
||||
- jenkins_job
|
||||
- tags:
|
||||
- name
|
||||
- parents
|
||||
- result
|
||||
- source
|
||||
- port
|
||||
- fields:
|
||||
- duration (ms)
|
||||
- number
|
||||
- result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED)
|
||||
|
||||
## Sample Queries
|
||||
|
||||
```sql
|
||||
SELECT mean("memory_available") AS "mean_memory_available", mean("memory_total") AS "mean_memory_total", mean("temp_available") AS "mean_temp_available" FROM "jenkins_node" WHERE time > now() - 15m GROUP BY time(:interval:) FILL(null)
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT mean("duration") AS "mean_duration" FROM "jenkins_job" WHERE time > now() - 24h GROUP BY time(:interval:) FILL(null)
|
||||
```
|
||||
|
||||
## Example Output
|
||||
|
||||
```text
|
||||
jenkins,host=myhost,port=80,source=my-jenkins-instance busy_executors=4i,total_executors=8i 1580418261000000000
|
||||
jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master,source=my-jenkins-instance,port=8080 swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744,num_executors=2i 1516031535000000000
|
||||
jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2831i,result_code=0i 1516026630000000000
|
||||
jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2285i,result_code=0i 1516027230000000000
|
||||
```
|
152
plugins/inputs/jenkins/client.go
Normal file
152
plugins/inputs/jenkins/client.go
Normal file
|
@ -0,0 +1,152 @@
|
|||
package jenkins
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
baseURL string
|
||||
httpClient *http.Client
|
||||
username string
|
||||
password string
|
||||
sessionCookie *http.Cookie
|
||||
semaphore chan struct{}
|
||||
}
|
||||
|
||||
func newClient(httpClient *http.Client, url, username, password string, maxConnections int) *client {
|
||||
return &client{
|
||||
baseURL: url,
|
||||
httpClient: httpClient,
|
||||
username: username,
|
||||
password: password,
|
||||
semaphore: make(chan struct{}, maxConnections),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) init() error {
|
||||
// get session cookie
|
||||
req, err := http.NewRequest("GET", c.baseURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.username != "" || c.password != "" {
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
for _, cc := range resp.Cookies() {
|
||||
if strings.Contains(cc.Name, "JSESSIONID") {
|
||||
c.sessionCookie = cc
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// first api fetch
|
||||
return c.doGet(context.Background(), jobPath, new(jobResponse))
|
||||
}
|
||||
|
||||
func (c *client) doGet(ctx context.Context, url string, v interface{}) error {
|
||||
req, err := createGetRequest(c.baseURL+url, c.username, c.password, c.sessionCookie)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case c.semaphore <- struct{}{}:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
resp, err := c.httpClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
<-c.semaphore
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
resp.Body.Close()
|
||||
<-c.semaphore
|
||||
}()
|
||||
// Clear invalid token if unauthorized
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
c.sessionCookie = nil
|
||||
return apiError{
|
||||
url: url,
|
||||
statusCode: resp.StatusCode,
|
||||
title: resp.Status,
|
||||
}
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return apiError{
|
||||
url: url,
|
||||
statusCode: resp.StatusCode,
|
||||
title: resp.Status,
|
||||
}
|
||||
}
|
||||
if resp.StatusCode == http.StatusNoContent {
|
||||
return apiError{
|
||||
url: url,
|
||||
statusCode: resp.StatusCode,
|
||||
title: resp.Status,
|
||||
}
|
||||
}
|
||||
|
||||
return json.NewDecoder(resp.Body).Decode(v)
|
||||
}
|
||||
|
||||
type apiError struct {
|
||||
url string
|
||||
statusCode int
|
||||
title string
|
||||
description string
|
||||
}
|
||||
|
||||
func (e apiError) Error() string {
|
||||
if e.description != "" {
|
||||
return fmt.Sprintf("[%s] %s: %s", e.url, e.title, e.description)
|
||||
}
|
||||
return fmt.Sprintf("[%s] %s", e.url, e.title)
|
||||
}
|
||||
|
||||
func createGetRequest(url, username, password string, sessionCookie *http.Cookie) (*http.Request, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if username != "" || password != "" {
|
||||
req.SetBasicAuth(username, password)
|
||||
}
|
||||
if sessionCookie != nil {
|
||||
req.AddCookie(sessionCookie)
|
||||
}
|
||||
req.Header.Add("Accept", "application/json")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (c *client) getJobs(ctx context.Context, jr *jobRequest) (js *jobResponse, err error) {
|
||||
js = new(jobResponse)
|
||||
url := jobPath
|
||||
if jr != nil {
|
||||
url = jr.url()
|
||||
}
|
||||
err = c.doGet(ctx, url, js)
|
||||
return js, err
|
||||
}
|
||||
|
||||
func (c *client) getBuild(ctx context.Context, jr jobRequest, number int64) (b *buildResponse, err error) {
|
||||
b = new(buildResponse)
|
||||
url := jr.buildURL(number)
|
||||
err = c.doGet(ctx, url, b)
|
||||
return b, err
|
||||
}
|
||||
|
||||
func (c *client) getAllNodes(ctx context.Context) (nodeResp *nodeResponse, err error) {
|
||||
nodeResp = new(nodeResponse)
|
||||
err = c.doGet(ctx, nodePath, nodeResp)
|
||||
return nodeResp, err
|
||||
}
|
468
plugins/inputs/jenkins/jenkins.go
Normal file
468
plugins/inputs/jenkins/jenkins.go
Normal file
|
@ -0,0 +1,468 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
const (
|
||||
measurementJenkins = "jenkins"
|
||||
measurementNode = "jenkins_node"
|
||||
measurementJob = "jenkins_job"
|
||||
)
|
||||
|
||||
type Jenkins struct {
|
||||
URL string `toml:"url"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
// HTTP Timeout specified as a string - 3s, 1m, 1h
|
||||
ResponseTimeout config.Duration `toml:"response_timeout"`
|
||||
source string
|
||||
port string
|
||||
|
||||
MaxConnections int `toml:"max_connections"`
|
||||
MaxBuildAge config.Duration `toml:"max_build_age"`
|
||||
MaxSubJobDepth int `toml:"max_subjob_depth"`
|
||||
MaxSubJobPerLayer int `toml:"max_subjob_per_layer"`
|
||||
NodeLabelsAsTag bool `toml:"node_labels_as_tag"`
|
||||
JobExclude []string `toml:"job_exclude"`
|
||||
JobInclude []string `toml:"job_include"`
|
||||
jobFilter filter.Filter
|
||||
|
||||
NodeExclude []string `toml:"node_exclude"`
|
||||
NodeInclude []string `toml:"node_include"`
|
||||
nodeFilter filter.Filter
|
||||
|
||||
tls.ClientConfig
|
||||
client *client
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
semaphore chan struct{}
|
||||
}
|
||||
|
||||
func (*Jenkins) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (j *Jenkins) Gather(acc telegraf.Accumulator) error {
|
||||
if j.client == nil {
|
||||
client, err := j.newHTTPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := j.initialize(client); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
j.gatherNodesData(acc)
|
||||
j.gatherJobs(acc)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) newHTTPClient() (*http.Client, error) {
|
||||
tlsCfg, err := j.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parse jenkins config %q: %w", j.URL, err)
|
||||
}
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
MaxIdleConns: j.MaxConnections,
|
||||
},
|
||||
Timeout: time.Duration(j.ResponseTimeout),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// separate the client as dependency to use httptest Client for mocking
|
||||
func (j *Jenkins) initialize(client *http.Client) error {
|
||||
var err error
|
||||
|
||||
// init jenkins tags
|
||||
u, err := url.Parse(j.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u.Port() == "" {
|
||||
if u.Scheme == "http" {
|
||||
j.port = "80"
|
||||
} else if u.Scheme == "https" {
|
||||
j.port = "443"
|
||||
}
|
||||
} else {
|
||||
j.port = u.Port()
|
||||
}
|
||||
j.source = u.Hostname()
|
||||
|
||||
// init filters
|
||||
j.jobFilter, err = filter.NewIncludeExcludeFilter(j.JobInclude, j.JobExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error compiling job filters %q: %w", j.URL, err)
|
||||
}
|
||||
j.nodeFilter, err = filter.NewIncludeExcludeFilter(j.NodeInclude, j.NodeExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error compiling node filters %q: %w", j.URL, err)
|
||||
}
|
||||
|
||||
// init tcp pool with default value
|
||||
if j.MaxConnections <= 0 {
|
||||
j.MaxConnections = 5
|
||||
}
|
||||
|
||||
// default sub jobs can be acquired
|
||||
if j.MaxSubJobPerLayer <= 0 {
|
||||
j.MaxSubJobPerLayer = 10
|
||||
}
|
||||
|
||||
j.semaphore = make(chan struct{}, j.MaxConnections)
|
||||
|
||||
j.client = newClient(client, j.URL, j.Username, j.Password, j.MaxConnections)
|
||||
|
||||
return j.client.init()
|
||||
}
|
||||
|
||||
func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error {
|
||||
if n.DisplayName == "" {
|
||||
return errors.New("error empty node name")
|
||||
}
|
||||
|
||||
tags := map[string]string{"node_name": n.DisplayName}
|
||||
|
||||
// filter out excluded or not included node_name
|
||||
if !j.nodeFilter.Match(tags["node_name"]) {
|
||||
return nil
|
||||
}
|
||||
|
||||
monitorData := n.MonitorData
|
||||
|
||||
if monitorData.HudsonNodeMonitorsArchitectureMonitor != "" {
|
||||
tags["arch"] = monitorData.HudsonNodeMonitorsArchitectureMonitor
|
||||
}
|
||||
|
||||
tags["status"] = "online"
|
||||
if n.Offline {
|
||||
tags["status"] = "offline"
|
||||
}
|
||||
|
||||
tags["source"] = j.source
|
||||
tags["port"] = j.port
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
fields["num_executors"] = n.NumExecutors
|
||||
|
||||
if j.NodeLabelsAsTag {
|
||||
labels := make([]string, 0, len(n.AssignedLabels))
|
||||
for _, label := range n.AssignedLabels {
|
||||
labels = append(labels, strings.ReplaceAll(label.Name, ",", "_"))
|
||||
}
|
||||
|
||||
if len(labels) == 0 {
|
||||
tags["labels"] = "none"
|
||||
} else {
|
||||
sort.Strings(labels)
|
||||
tags["labels"] = strings.Join(labels, ",")
|
||||
}
|
||||
}
|
||||
|
||||
if monitorData.HudsonNodeMonitorsResponseTimeMonitor != nil {
|
||||
fields["response_time"] = monitorData.HudsonNodeMonitorsResponseTimeMonitor.Average
|
||||
}
|
||||
if monitorData.HudsonNodeMonitorsDiskSpaceMonitor != nil {
|
||||
tags["disk_path"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Path
|
||||
fields["disk_available"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Size
|
||||
}
|
||||
if monitorData.HudsonNodeMonitorsTemporarySpaceMonitor != nil {
|
||||
tags["temp_path"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Path
|
||||
fields["temp_available"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Size
|
||||
}
|
||||
if monitorData.HudsonNodeMonitorsSwapSpaceMonitor != nil {
|
||||
fields["swap_available"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapAvailable
|
||||
fields["memory_available"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryAvailable
|
||||
fields["swap_total"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapTotal
|
||||
fields["memory_total"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryTotal
|
||||
}
|
||||
acc.AddFields(measurementNode, fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) gatherNodesData(acc telegraf.Accumulator) {
|
||||
nodeResp, err := j.client.getAllNodes(context.Background())
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
// get total and busy executors
|
||||
tags := map[string]string{"source": j.source, "port": j.port}
|
||||
fields := make(map[string]interface{})
|
||||
fields["busy_executors"] = nodeResp.BusyExecutors
|
||||
fields["total_executors"] = nodeResp.TotalExecutors
|
||||
|
||||
acc.AddFields(measurementJenkins, fields, tags)
|
||||
|
||||
// get node data
|
||||
for _, node := range nodeResp.Computers {
|
||||
err = j.gatherNodeData(node, acc)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
acc.AddError(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Jenkins) gatherJobs(acc telegraf.Accumulator) {
|
||||
js, err := j.client.getJobs(context.Background(), nil)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
for _, job := range js.Jobs {
|
||||
wg.Add(1)
|
||||
go func(name string, wg *sync.WaitGroup, acc telegraf.Accumulator) {
|
||||
defer wg.Done()
|
||||
if err := j.getJobDetail(jobRequest{
|
||||
name: name,
|
||||
layer: 0,
|
||||
}, acc); err != nil {
|
||||
acc.AddError(err)
|
||||
}
|
||||
}(job.Name, &wg, acc)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error {
|
||||
if j.MaxSubJobDepth > 0 && jr.layer == j.MaxSubJobDepth {
|
||||
return nil
|
||||
}
|
||||
|
||||
js, err := j.client.getJobs(context.Background(), &jr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for k, ij := range js.Jobs {
|
||||
if k < len(js.Jobs)-j.MaxSubJobPerLayer-1 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
// schedule tcp fetch for inner jobs
|
||||
go func(ij innerJob, jr jobRequest, acc telegraf.Accumulator) {
|
||||
defer wg.Done()
|
||||
if err := j.getJobDetail(jobRequest{
|
||||
name: ij.Name,
|
||||
parents: jr.combined(),
|
||||
layer: jr.layer + 1,
|
||||
}, acc); err != nil {
|
||||
acc.AddError(err)
|
||||
}
|
||||
}(ij, jr, acc)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// filter out excluded or not included jobs
|
||||
if !j.jobFilter.Match(jr.hierarchyName()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// collect build info
|
||||
number := js.LastBuild.Number
|
||||
if number < 1 {
|
||||
// no build info
|
||||
return nil
|
||||
}
|
||||
build, err := j.client.getBuild(context.Background(), jr, number)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if build.Building {
|
||||
j.Log.Debugf("Ignore running build on %s, build %v", jr.name, number)
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop if build is too old
|
||||
// Higher up in gatherJobs
|
||||
cutoff := time.Now().Add(-1 * time.Duration(j.MaxBuildAge))
|
||||
|
||||
// Here we just test
|
||||
if build.getTimestamp().Before(cutoff) {
|
||||
return nil
|
||||
}
|
||||
|
||||
j.gatherJobBuild(jr, build, acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
type nodeResponse struct {
|
||||
Computers []node `json:"computer"`
|
||||
BusyExecutors int `json:"busyExecutors"`
|
||||
TotalExecutors int `json:"totalExecutors"`
|
||||
}
|
||||
|
||||
type node struct {
|
||||
DisplayName string `json:"displayName"`
|
||||
Offline bool `json:"offline"`
|
||||
NumExecutors int `json:"numExecutors"`
|
||||
MonitorData monitorData `json:"monitorData"`
|
||||
AssignedLabels []label `json:"assignedLabels"`
|
||||
}
|
||||
|
||||
type label struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type monitorData struct {
|
||||
HudsonNodeMonitorsArchitectureMonitor string `json:"hudson.node_monitors.ArchitectureMonitor"`
|
||||
HudsonNodeMonitorsDiskSpaceMonitor *nodeSpaceMonitor `json:"hudson.node_monitors.DiskSpaceMonitor"`
|
||||
HudsonNodeMonitorsResponseTimeMonitor *responseTimeMonitor `json:"hudson.node_monitors.ResponseTimeMonitor"`
|
||||
HudsonNodeMonitorsSwapSpaceMonitor *swapSpaceMonitor `json:"hudson.node_monitors.SwapSpaceMonitor"`
|
||||
HudsonNodeMonitorsTemporarySpaceMonitor *nodeSpaceMonitor `json:"hudson.node_monitors.TemporarySpaceMonitor"`
|
||||
}
|
||||
|
||||
type nodeSpaceMonitor struct {
|
||||
Path string `json:"path"`
|
||||
Size float64 `json:"size"`
|
||||
}
|
||||
|
||||
type responseTimeMonitor struct {
|
||||
Average int64 `json:"average"`
|
||||
}
|
||||
|
||||
type swapSpaceMonitor struct {
|
||||
SwapAvailable float64 `json:"availableSwapSpace"`
|
||||
SwapTotal float64 `json:"totalSwapSpace"`
|
||||
MemoryAvailable float64 `json:"availablePhysicalMemory"`
|
||||
MemoryTotal float64 `json:"totalPhysicalMemory"`
|
||||
}
|
||||
|
||||
type jobResponse struct {
|
||||
LastBuild jobBuild `json:"lastBuild"`
|
||||
Jobs []innerJob `json:"jobs"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type innerJob struct {
|
||||
Name string `json:"name"`
|
||||
URL string `json:"url"`
|
||||
Color string `json:"color"`
|
||||
}
|
||||
|
||||
type jobBuild struct {
|
||||
Number int64
|
||||
URL string
|
||||
}
|
||||
|
||||
type buildResponse struct {
|
||||
Building bool `json:"building"`
|
||||
Duration int64 `json:"duration"`
|
||||
Number int64 `json:"number"`
|
||||
Result string `json:"result"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
func (b *buildResponse) getTimestamp() time.Time {
|
||||
return time.Unix(0, b.Timestamp*int64(time.Millisecond))
|
||||
}
|
||||
|
||||
const (
|
||||
nodePath = "/computer/api/json"
|
||||
jobPath = "/api/json"
|
||||
)
|
||||
|
||||
type jobRequest struct {
|
||||
name string
|
||||
parents []string
|
||||
layer int
|
||||
}
|
||||
|
||||
func (jr jobRequest) combined() []string {
|
||||
path := make([]string, 0, len(jr.parents)+1)
|
||||
path = append(path, jr.parents...)
|
||||
return append(path, jr.name)
|
||||
}
|
||||
|
||||
func (jr jobRequest) combinedEscaped() []string {
|
||||
jobs := jr.combined()
|
||||
for index, job := range jobs {
|
||||
jobs[index] = url.PathEscape(job)
|
||||
}
|
||||
return jobs
|
||||
}
|
||||
|
||||
func (jr jobRequest) url() string {
|
||||
return "/job/" + strings.Join(jr.combinedEscaped(), "/job/") + jobPath
|
||||
}
|
||||
|
||||
func (jr jobRequest) buildURL(number int64) string {
|
||||
return "/job/" + strings.Join(jr.combinedEscaped(), "/job/") + "/" + strconv.Itoa(int(number)) + jobPath
|
||||
}
|
||||
|
||||
func (jr jobRequest) hierarchyName() string {
|
||||
return strings.Join(jr.combined(), "/")
|
||||
}
|
||||
|
||||
func (jr jobRequest) parentsString() string {
|
||||
return strings.Join(jr.parents, "/")
|
||||
}
|
||||
|
||||
func (j *Jenkins) gatherJobBuild(jr jobRequest, b *buildResponse, acc telegraf.Accumulator) {
|
||||
tags := map[string]string{"name": jr.name, "parents": jr.parentsString(), "result": b.Result, "source": j.source, "port": j.port}
|
||||
fields := make(map[string]interface{})
|
||||
fields["duration"] = b.Duration
|
||||
fields["result_code"] = mapResultCode(b.Result)
|
||||
fields["number"] = b.Number
|
||||
|
||||
acc.AddFields(measurementJob, fields, tags, b.getTimestamp())
|
||||
}
|
||||
|
||||
// perform status mapping
|
||||
func mapResultCode(s string) int {
|
||||
switch strings.ToLower(s) {
|
||||
case "success":
|
||||
return 0
|
||||
case "failure":
|
||||
return 1
|
||||
case "not_built":
|
||||
return 2
|
||||
case "unstable":
|
||||
return 3
|
||||
case "aborted":
|
||||
return 4
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("jenkins", func() telegraf.Input {
|
||||
return &Jenkins{
|
||||
MaxBuildAge: config.Duration(time.Hour),
|
||||
MaxConnections: 5,
|
||||
MaxSubJobPerLayer: 10,
|
||||
}
|
||||
})
|
||||
}
|
998
plugins/inputs/jenkins/jenkins_test.go
Normal file
998
plugins/inputs/jenkins/jenkins_test.go
Normal file
|
@ -0,0 +1,998 @@
|
|||
// Test Suite
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestJobRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
input jobRequest
|
||||
hierarchyName string
|
||||
URL string
|
||||
}{
|
||||
{
|
||||
jobRequest{},
|
||||
"",
|
||||
"",
|
||||
},
|
||||
{
|
||||
jobRequest{
|
||||
name: "1",
|
||||
parents: []string{"3", "2"},
|
||||
},
|
||||
"3/2/1",
|
||||
"/job/3/job/2/job/1/api/json",
|
||||
},
|
||||
{
|
||||
jobRequest{
|
||||
name: "job 3",
|
||||
parents: []string{"job 1", "job 2"},
|
||||
},
|
||||
"job 1/job 2/job 3",
|
||||
"/job/job%201/job/job%202/job/job%203/api/json",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
hierarchyName := test.input.hierarchyName()
|
||||
address := test.input.url()
|
||||
if hierarchyName != test.hierarchyName {
|
||||
t.Errorf("Expected %s, got %s\n", test.hierarchyName, hierarchyName)
|
||||
}
|
||||
|
||||
if test.URL != "" && address != test.URL {
|
||||
t.Errorf("Expected %s, got %s\n", test.URL, address)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResultCode(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
output int
|
||||
}{
|
||||
{"SUCCESS", 0},
|
||||
{"Failure", 1},
|
||||
{"NOT_BUILT", 2},
|
||||
{"UNSTABLE", 3},
|
||||
{"ABORTED", 4},
|
||||
}
|
||||
for _, test := range tests {
|
||||
output := mapResultCode(test.input)
|
||||
if output != test.output {
|
||||
t.Errorf("Expected %d, got %d\n", test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type mockHandler struct {
|
||||
// responseMap is the path to response interface
|
||||
// we will output the serialized response in json when serving http
|
||||
// example '/computer/api/json': *gojenkins.
|
||||
responseMap map[string]interface{}
|
||||
}
|
||||
|
||||
func (h mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
o, ok := h.responseMap[r.URL.RequestURI()]
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
b, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if len(b) == 0 {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
w.Write(b) //nolint:errcheck // ignore the returned error as the tests will fail anyway
|
||||
}
|
||||
|
||||
func TestGatherNodeData(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input mockHandler
|
||||
output *testutil.Accumulator
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "bad node data",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": nodeResponse{
|
||||
Computers: []node{
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
output: &testutil.Accumulator{
|
||||
Metrics: []*testutil.Metric{
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"source": "127.0.0.1",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"busy_executors": 0,
|
||||
"total_executors": 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty monitor data",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": nodeResponse{
|
||||
Computers: []node{
|
||||
{DisplayName: "master"},
|
||||
{DisplayName: "node1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
output: &testutil.Accumulator{},
|
||||
},
|
||||
{
|
||||
name: "filtered nodes (excluded)",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": nodeResponse{
|
||||
BusyExecutors: 4,
|
||||
TotalExecutors: 8,
|
||||
Computers: []node{
|
||||
{DisplayName: "ignore-1"},
|
||||
{DisplayName: "ignore-2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
output: &testutil.Accumulator{
|
||||
Metrics: []*testutil.Metric{
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"source": "127.0.0.1",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"busy_executors": 4,
|
||||
"total_executors": 8,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filtered nodes (included)",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": nodeResponse{
|
||||
BusyExecutors: 4,
|
||||
TotalExecutors: 8,
|
||||
Computers: []node{
|
||||
{DisplayName: "filtered-1"},
|
||||
{DisplayName: "filtered-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
output: &testutil.Accumulator{
|
||||
Metrics: []*testutil.Metric{
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"source": "127.0.0.1",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"busy_executors": 4,
|
||||
"total_executors": 8,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "normal data collection",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": nodeResponse{
|
||||
BusyExecutors: 4,
|
||||
TotalExecutors: 8,
|
||||
Computers: []node{
|
||||
{
|
||||
DisplayName: "master",
|
||||
MonitorData: monitorData{
|
||||
HudsonNodeMonitorsArchitectureMonitor: "linux",
|
||||
HudsonNodeMonitorsResponseTimeMonitor: &responseTimeMonitor{
|
||||
Average: 10032,
|
||||
},
|
||||
HudsonNodeMonitorsDiskSpaceMonitor: &nodeSpaceMonitor{
|
||||
Path: "/path/1",
|
||||
Size: 123,
|
||||
},
|
||||
HudsonNodeMonitorsTemporarySpaceMonitor: &nodeSpaceMonitor{
|
||||
Path: "/path/2",
|
||||
Size: 245,
|
||||
},
|
||||
HudsonNodeMonitorsSwapSpaceMonitor: &swapSpaceMonitor{
|
||||
SwapAvailable: 212,
|
||||
SwapTotal: 500,
|
||||
MemoryAvailable: 101,
|
||||
MemoryTotal: 500,
|
||||
},
|
||||
},
|
||||
Offline: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
output: &testutil.Accumulator{
|
||||
Metrics: []*testutil.Metric{
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"source": "127.0.0.1",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"busy_executors": 4,
|
||||
"total_executors": 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"node_name": "master",
|
||||
"arch": "linux",
|
||||
"status": "online",
|
||||
"disk_path": "/path/1",
|
||||
"temp_path": "/path/2",
|
||||
"source": "127.0.0.1",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"response_time": int64(10032),
|
||||
"disk_available": float64(123),
|
||||
"temp_available": float64(245),
|
||||
"swap_available": float64(212),
|
||||
"swap_total": float64(500),
|
||||
"memory_available": float64(101),
|
||||
"memory_total": float64(500),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "slave is offline",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": nodeResponse{
|
||||
BusyExecutors: 4,
|
||||
TotalExecutors: 8,
|
||||
Computers: []node{
|
||||
{
|
||||
DisplayName: "slave",
|
||||
MonitorData: monitorData{},
|
||||
NumExecutors: 1,
|
||||
Offline: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
output: &testutil.Accumulator{
|
||||
Metrics: []*testutil.Metric{
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"source": "127.0.0.1",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"busy_executors": 4,
|
||||
"total_executors": 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"node_name": "slave",
|
||||
"status": "offline",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"num_executors": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ts := httptest.NewServer(test.input)
|
||||
defer ts.Close()
|
||||
j := &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
URL: ts.URL,
|
||||
ResponseTimeout: config.Duration(time.Microsecond),
|
||||
NodeExclude: []string{"ignore-1", "ignore-2"},
|
||||
NodeInclude: []string{"master", "slave"},
|
||||
}
|
||||
te := j.initialize(&http.Client{Transport: &http.Transport{}})
|
||||
acc := new(testutil.Accumulator)
|
||||
j.gatherNodesData(acc)
|
||||
if err := acc.FirstError(); err != nil {
|
||||
te = err
|
||||
}
|
||||
|
||||
if !test.wantErr && te != nil {
|
||||
t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
|
||||
} else if test.wantErr && te == nil {
|
||||
t.Fatalf("%s: expected err, got nil", test.name)
|
||||
}
|
||||
if test.output == nil && len(acc.Metrics) > 0 {
|
||||
t.Fatalf("%s: collected extra data %s", test.name, acc.Metrics)
|
||||
} else if test.output != nil && len(test.output.Metrics) > 0 {
|
||||
for i := 0; i < len(test.output.Metrics); i++ {
|
||||
for k, m := range test.output.Metrics[i].Tags {
|
||||
if acc.Metrics[i].Tags[k] != m {
|
||||
t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[0].Tags[k])
|
||||
}
|
||||
}
|
||||
for k, m := range test.output.Metrics[i].Fields {
|
||||
if acc.Metrics[i].Fields[k] != m {
|
||||
t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n",
|
||||
test.name, k, m, m, acc.Metrics[0].Fields[k], acc.Metrics[0].Fields[k])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLabels(t *testing.T) {
|
||||
input := mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
"/computer/api/json": nodeResponse{
|
||||
BusyExecutors: 4,
|
||||
TotalExecutors: 8,
|
||||
Computers: []node{
|
||||
{
|
||||
DisplayName: "master",
|
||||
AssignedLabels: []label{
|
||||
{"project_a"},
|
||||
{"testing"},
|
||||
},
|
||||
MonitorData: monitorData{
|
||||
HudsonNodeMonitorsResponseTimeMonitor: &responseTimeMonitor{
|
||||
Average: 54321,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
DisplayName: "secondary",
|
||||
MonitorData: monitorData{
|
||||
HudsonNodeMonitorsResponseTimeMonitor: &responseTimeMonitor{
|
||||
Average: 12345,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
testutil.MustMetric("jenkins",
|
||||
map[string]string{
|
||||
"source": "127.0.0.1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"busy_executors": 4,
|
||||
"total_executors": 8,
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
testutil.MustMetric("jenkins_node",
|
||||
map[string]string{
|
||||
"node_name": "master",
|
||||
"status": "online",
|
||||
"source": "127.0.0.1",
|
||||
"labels": "project_a,testing",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"num_executors": int64(0),
|
||||
"response_time": int64(54321),
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
testutil.MustMetric("jenkins_node",
|
||||
map[string]string{
|
||||
"node_name": "secondary",
|
||||
"status": "online",
|
||||
"source": "127.0.0.1",
|
||||
"labels": "none",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"num_executors": int64(0),
|
||||
"response_time": int64(12345),
|
||||
},
|
||||
time.Unix(0, 0),
|
||||
),
|
||||
}
|
||||
|
||||
ts := httptest.NewServer(input)
|
||||
defer ts.Close()
|
||||
j := &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
URL: ts.URL,
|
||||
ResponseTimeout: config.Duration(time.Microsecond),
|
||||
NodeLabelsAsTag: true,
|
||||
}
|
||||
require.NoError(t, j.initialize(&http.Client{Transport: &http.Transport{}}))
|
||||
acc := new(testutil.Accumulator)
|
||||
j.gatherNodesData(acc)
|
||||
require.NoError(t, acc.FirstError())
|
||||
results := acc.GetTelegrafMetrics()
|
||||
for _, metric := range results {
|
||||
metric.RemoveTag("port")
|
||||
}
|
||||
testutil.RequireMetricsEqual(t, expected, results, testutil.IgnoreTime())
|
||||
}
|
||||
|
||||
func TestInitialize(t *testing.T) {
|
||||
mh := mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": struct{}{},
|
||||
},
|
||||
}
|
||||
ts := httptest.NewServer(mh)
|
||||
defer ts.Close()
|
||||
mockClient := &http.Client{Transport: &http.Transport{}}
|
||||
tests := []struct {
|
||||
// name of the test
|
||||
name string
|
||||
input *Jenkins
|
||||
output *Jenkins
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "bad jenkins config",
|
||||
input: &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
URL: "http://a bad url",
|
||||
ResponseTimeout: config.Duration(time.Microsecond),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "has filter",
|
||||
input: &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
URL: ts.URL,
|
||||
ResponseTimeout: config.Duration(time.Microsecond),
|
||||
JobInclude: []string{"jobA", "jobB"},
|
||||
JobExclude: []string{"job1", "job2"},
|
||||
NodeExclude: []string{"node1", "node2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default config",
|
||||
input: &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
URL: ts.URL,
|
||||
ResponseTimeout: config.Duration(time.Microsecond),
|
||||
},
|
||||
output: &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
MaxConnections: 5,
|
||||
MaxSubJobPerLayer: 10,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
te := test.input.initialize(mockClient)
|
||||
if !test.wantErr && te != nil {
|
||||
t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
|
||||
} else if test.wantErr && te == nil {
|
||||
t.Fatalf("%s: expected err, got nil", test.name)
|
||||
}
|
||||
if test.output != nil {
|
||||
if test.input.client == nil {
|
||||
t.Fatalf("%s: failed %v, jenkins instance shouldn't be nil", test.name, te)
|
||||
}
|
||||
if test.input.MaxConnections != test.output.MaxConnections {
|
||||
t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGatherJobs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input mockHandler
|
||||
output *testutil.Accumulator
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty job",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &jobResponse{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bad inner jobs",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "job1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "jobs has no build",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "job1"},
|
||||
},
|
||||
},
|
||||
"/job/job1/api/json": &jobResponse{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bad build info",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "job1"},
|
||||
},
|
||||
},
|
||||
"/job/job1/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "ignore building job",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "job1"},
|
||||
},
|
||||
},
|
||||
"/job/job1/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/job1/1/api/json": &buildResponse{
|
||||
Building: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ignore old build",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "job1"},
|
||||
},
|
||||
},
|
||||
"/job/job1/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 2,
|
||||
},
|
||||
},
|
||||
"/job/job1/2/api/json": &buildResponse{
|
||||
Building: false,
|
||||
Timestamp: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gather metrics",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "job1"},
|
||||
{Name: "job2"},
|
||||
},
|
||||
},
|
||||
"/job/job1/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 3,
|
||||
},
|
||||
},
|
||||
"/job/job2/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/job1/3/api/json": &buildResponse{
|
||||
Building: false,
|
||||
Result: "SUCCESS",
|
||||
Duration: 25558,
|
||||
Number: 3,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
"/job/job2/1/api/json": &buildResponse{
|
||||
Building: false,
|
||||
Result: "FAILURE",
|
||||
Duration: 1558,
|
||||
Number: 1,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
output: &testutil.Accumulator{
|
||||
Metrics: []*testutil.Metric{
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"name": "job1",
|
||||
"result": "SUCCESS",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(25558),
|
||||
"number": int64(3),
|
||||
"result_code": 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"name": "job2",
|
||||
"result": "FAILURE",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(1558),
|
||||
"number": int64(1),
|
||||
"result_code": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gather metrics for jobs with space",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "job 1"},
|
||||
},
|
||||
},
|
||||
"/job/job%201/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 3,
|
||||
},
|
||||
},
|
||||
"/job/job%201/3/api/json": &buildResponse{
|
||||
Building: false,
|
||||
Result: "SUCCESS",
|
||||
Duration: 25558,
|
||||
Number: 3,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
output: &testutil.Accumulator{
|
||||
Metrics: []*testutil.Metric{
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"name": "job 1",
|
||||
"result": "SUCCESS",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(25558),
|
||||
"number": int64(3),
|
||||
"result_code": 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gather metrics for nested jobs with space exercising append slice behaviour",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "l1"},
|
||||
},
|
||||
},
|
||||
"/job/l1/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "l2"},
|
||||
},
|
||||
},
|
||||
"/job/l1/job/l2/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "job 1"},
|
||||
},
|
||||
},
|
||||
"/job/l1/job/l2/job/job%201/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "job 2"},
|
||||
},
|
||||
},
|
||||
"/job/l1/job/l2/job/job%201/job/job%202/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 3,
|
||||
},
|
||||
},
|
||||
"/job/l1/job/l2/job/job%201/job/job%202/3/api/json": &buildResponse{
|
||||
Building: false,
|
||||
Result: "SUCCESS",
|
||||
Duration: 25558,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
output: &testutil.Accumulator{
|
||||
Metrics: []*testutil.Metric{
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"name": "job 2",
|
||||
"parents": "l1/l2/job 1",
|
||||
"result": "SUCCESS",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(25558),
|
||||
"result_code": 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gather sub jobs, jobs filter",
|
||||
input: mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "apps"},
|
||||
{Name: "ignore-1"},
|
||||
},
|
||||
},
|
||||
"/job/ignore-1/api/json": &jobResponse{},
|
||||
"/job/apps/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "k8s-cloud"},
|
||||
{Name: "chronograf"},
|
||||
{Name: "ignore-all"},
|
||||
},
|
||||
},
|
||||
"/job/apps/job/ignore-all/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "1"},
|
||||
{Name: "2"},
|
||||
},
|
||||
},
|
||||
"/job/apps/job/ignore-all/job/1/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/apps/job/ignore-all/job/2/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/apps/job/chronograf/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/api/json": &jobResponse{
|
||||
Jobs: []innerJob{
|
||||
{Name: "PR-100"},
|
||||
{Name: "PR-101"},
|
||||
{Name: "PR-ignore2"},
|
||||
{Name: "PR 1"},
|
||||
{Name: "PR ignore"},
|
||||
},
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/job/PR%20ignore/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/job/PR-ignore2/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/job/PR-100/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/job/PR-101/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 4,
|
||||
},
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/job/PR%201/api/json": &jobResponse{
|
||||
LastBuild: jobBuild{
|
||||
Number: 1,
|
||||
},
|
||||
},
|
||||
"/job/apps/job/chronograf/1/api/json": &buildResponse{
|
||||
Building: false,
|
||||
Result: "FAILURE",
|
||||
Duration: 1558,
|
||||
Number: 1,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/job/PR-101/4/api/json": &buildResponse{
|
||||
Building: false,
|
||||
Result: "SUCCESS",
|
||||
Duration: 76558,
|
||||
Number: 4,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/job/PR-100/1/api/json": &buildResponse{
|
||||
Building: false,
|
||||
Result: "SUCCESS",
|
||||
Duration: 91558,
|
||||
Number: 1,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
"/job/apps/job/k8s-cloud/job/PR%201/1/api/json": &buildResponse{
|
||||
Building: false,
|
||||
Result: "SUCCESS",
|
||||
Duration: 87832,
|
||||
Number: 1,
|
||||
Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
output: &testutil.Accumulator{
|
||||
Metrics: []*testutil.Metric{
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"name": "PR 1",
|
||||
"parents": "apps/k8s-cloud",
|
||||
"result": "SUCCESS",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(87832),
|
||||
"number": int64(1),
|
||||
"result_code": 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"name": "PR-100",
|
||||
"parents": "apps/k8s-cloud",
|
||||
"result": "SUCCESS",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(91558),
|
||||
"number": int64(1),
|
||||
"result_code": 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"name": "PR-101",
|
||||
"parents": "apps/k8s-cloud",
|
||||
"result": "SUCCESS",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(76558),
|
||||
"number": int64(4),
|
||||
"result_code": 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
"name": "chronograf",
|
||||
"parents": "apps",
|
||||
"result": "FAILURE",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration": int64(1558),
|
||||
"number": int64(1),
|
||||
"result_code": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ts := httptest.NewServer(test.input)
|
||||
defer ts.Close()
|
||||
j := &Jenkins{
|
||||
Log: testutil.Logger{},
|
||||
URL: ts.URL,
|
||||
MaxBuildAge: config.Duration(time.Hour),
|
||||
ResponseTimeout: config.Duration(time.Microsecond),
|
||||
JobInclude: []string{
|
||||
"*",
|
||||
},
|
||||
JobExclude: []string{
|
||||
"ignore-1",
|
||||
"apps/ignore-all/*",
|
||||
"apps/k8s-cloud/PR-ignore2",
|
||||
"apps/k8s-cloud/PR ignore",
|
||||
},
|
||||
}
|
||||
te := j.initialize(&http.Client{Transport: &http.Transport{}})
|
||||
acc := new(testutil.Accumulator)
|
||||
j.gatherJobs(acc)
|
||||
if err := acc.FirstError(); err != nil {
|
||||
te = err
|
||||
}
|
||||
if !test.wantErr && te != nil {
|
||||
t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error())
|
||||
} else if test.wantErr && te == nil {
|
||||
t.Fatalf("%s: expected err, got nil", test.name)
|
||||
}
|
||||
|
||||
if test.output != nil && len(test.output.Metrics) > 0 {
|
||||
// sort metrics
|
||||
sort.Slice(acc.Metrics, func(i, j int) bool {
|
||||
return strings.Compare(acc.Metrics[i].Tags["name"], acc.Metrics[j].Tags["name"]) < 0
|
||||
})
|
||||
for i := range test.output.Metrics {
|
||||
for k, m := range test.output.Metrics[i].Tags {
|
||||
if acc.Metrics[i].Tags[k] != m {
|
||||
t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[i].Tags[k])
|
||||
}
|
||||
}
|
||||
for k, m := range test.output.Metrics[i].Fields {
|
||||
if acc.Metrics[i].Fields[k] != m {
|
||||
t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n",
|
||||
test.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[0].Fields[k])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
52
plugins/inputs/jenkins/sample.conf
Normal file
52
plugins/inputs/jenkins/sample.conf
Normal file
|
@ -0,0 +1,52 @@
|
|||
# Read jobs and cluster metrics from Jenkins instances
|
||||
[[inputs.jenkins]]
|
||||
## The Jenkins URL in the format "schema://host:port"
|
||||
url = "http://my-jenkins-instance:8080"
|
||||
# username = "admin"
|
||||
# password = "admin"
|
||||
|
||||
## Set response_timeout
|
||||
response_timeout = "5s"
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Optional Max Job Build Age filter
|
||||
## Default 1 hour, ignore builds older than max_build_age
|
||||
# max_build_age = "1h"
|
||||
|
||||
## Optional Sub Job Depth filter
|
||||
## Jenkins can have unlimited layer of sub jobs
|
||||
## This config will limit the layers of pulling, default value 0 means
|
||||
## unlimited pulling until no more sub jobs
|
||||
# max_subjob_depth = 0
|
||||
|
||||
## Optional Sub Job Per Layer
|
||||
## In workflow-multibranch-plugin, each branch will be created as a sub job.
|
||||
## This config will limit to call only the lasted branches in each layer,
|
||||
## empty will use default value 10
|
||||
# max_subjob_per_layer = 10
|
||||
|
||||
## Jobs to include or exclude from gathering
|
||||
## When using both lists, job_exclude has priority.
|
||||
## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"]
|
||||
# job_include = [ "*" ]
|
||||
# job_exclude = [ ]
|
||||
|
||||
## Nodes to include or exclude from gathering
|
||||
## When using both lists, node_exclude has priority.
|
||||
# node_include = [ "*" ]
|
||||
# node_exclude = [ ]
|
||||
|
||||
## Worker pool for jenkins plugin only
|
||||
## Empty this field will use default value 5
|
||||
# max_connections = 5
|
||||
|
||||
## When set to true will add node labels as a comma-separated tag. If none,
|
||||
## are found, then a tag with the value of 'none' is used. Finally, if a
|
||||
## label contains a comma it is replaced with an underscore.
|
||||
# node_labels_as_tag = false
|
Loading…
Add table
Add a link
Reference in a new issue