1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,337 @@
# Couchbase Input Plugin
This plugin collects metrics from [Couchbase][couchbase], a distributed NoSQL
database. Metrics are collected for each node, as well as detailed metrics for
each bucket, for a given couchbase server.
⭐ Telegraf v0.12.0
🏷️ server
💻 all
[couchbase]: https://www.couchbase.com/
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Read per-node and per-bucket metrics from Couchbase
[[inputs.couchbase]]
## specify servers via a url matching:
## [protocol://][:password]@address[:port]
## e.g.
## http://couchbase-0.example.com/
## http://admin:secret@couchbase-0.example.com:8091/
##
## If no servers are specified, then localhost is used as the host.
## If no protocol is specified, HTTP is used.
## If no port is specified, 8091 is used.
servers = ["http://localhost:8091"]
## Filter bucket fields to include only here.
# bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification (defaults to false)
## If set to false, tls_cert and tls_key are required
# insecure_skip_verify = false
## Whether to collect cluster-wide bucket statistics
## It is recommended to disable this in favor of node_stats
## to get a better view of the cluster.
# cluster_bucket_stats = true
## Whether to collect bucket stats for each individual node
# node_bucket_stats = false
## List of additional stats to collect, choose from:
## * autofailover
# additional_stats = []
```
## Metrics
### couchbase_node
Tags:
- cluster: sanitized string from `servers` configuration field
e.g.: `http://user:password@couchbase-0.example.com:8091/endpoint` becomes
`http://couchbase-0.example.com:8091/endpoint`
- hostname: Couchbase's name for the node and port, e.g., `172.16.10.187:8091`
Fields:
- memory_free (unit: bytes, example: 23181365248.0)
- memory_total (unit: bytes, example: 64424656896.0)
### couchbase_autofailover
Tags:
- cluster: sanitized string from `servers` configuration field
e.g.: `http://user:password@couchbase-0.example.com:8091/endpoint` becomes
`http://couchbase-0.example.com:8091/endpoint`
Fields:
- count (unit: int, example: 1)
- enabled (unit: bool, example: true)
- max_count (unit: int, example: 2)
- timeout (unit: int, example: 72)
### couchbase_bucket and couchbase_node_bucket
Tags:
- cluster: whatever you called it in `servers` in the configuration,
e.g. `http://couchbase-0.example.com/`
- bucket: the name of the couchbase bucket, e.g., `blastro-df`
- hostname: the hostname of the node the bucket metrics were collected
from, e.g. `172.16.10.187:8091` (only present in `couchbase_node_bucket`)
Default bucket fields:
- quota_percent_used (unit: percent, example: 68.85424936294555)
- ops_per_sec (unit: count, example: 5686.789686789687)
- disk_fetches (unit: count, example: 0.0)
- item_count (unit: count, example: 943239752.0)
- disk_used (unit: bytes, example: 409178772321.0)
- data_used (unit: bytes, example: 212179309111.0)
- mem_used (unit: bytes, example: 202156957464.0)
Additional fields that can be configured with the `bucket_stats_included`
option:
- couch_total_disk_size
- couch_docs_fragmentation
- couch_views_fragmentation
- hit_ratio
- ep_cache_miss_rate
- ep_resident_items_rate
- vb_avg_active_queue_age
- vb_avg_replica_queue_age
- vb_avg_pending_queue_age
- vb_avg_total_queue_age
- vb_active_resident_items_ratio
- vb_replica_resident_items_ratio
- vb_pending_resident_items_ratio
- avg_disk_update_time
- avg_disk_commit_time
- avg_bg_wait_time
- avg_active_timestamp_drift
- avg_replica_timestamp_drift
- ep_dcp_views+indexes_count
- ep_dcp_views+indexes_items_remaining
- ep_dcp_views+indexes_producer_count
- ep_dcp_views+indexes_total_backlog_size
- ep_dcp_views+indexes_items_sent
- ep_dcp_views+indexes_total_bytes
- ep_dcp_views+indexes_backoff
- bg_wait_count
- bg_wait_total
- bytes_read
- bytes_written
- cas_badval
- cas_hits
- cas_misses
- cmd_get
- cmd_lookup
- cmd_set
- couch_docs_actual_disk_size
- couch_docs_data_size
- couch_docs_disk_size
- couch_spatial_data_size
- couch_spatial_disk_size
- couch_spatial_ops
- couch_views_actual_disk_size
- couch_views_data_size
- couch_views_disk_size
- couch_views_ops
- curr_connections
- curr_items
- curr_items_tot
- decr_hits
- decr_misses
- delete_hits
- delete_misses
- disk_commit_count
- disk_commit_total
- disk_update_count
- disk_update_total
- disk_write_queue
- ep_active_ahead_exceptions
- ep_active_hlc_drift
- ep_active_hlc_drift_count
- ep_bg_fetched
- ep_clock_cas_drift_threshold_exceeded
- ep_data_read_failed
- ep_data_write_failed
- ep_dcp_2i_backoff
- ep_dcp_2i_count
- ep_dcp_2i_items_remaining
- ep_dcp_2i_items_sent
- ep_dcp_2i_producer_count
- ep_dcp_2i_total_backlog_size
- ep_dcp_2i_total_bytes
- ep_dcp_cbas_backoff
- ep_dcp_cbas_count
- ep_dcp_cbas_items_remaining
- ep_dcp_cbas_items_sent
- ep_dcp_cbas_producer_count
- ep_dcp_cbas_total_backlog_size
- ep_dcp_cbas_total_bytes
- ep_dcp_eventing_backoff
- ep_dcp_eventing_count
- ep_dcp_eventing_items_remaining
- ep_dcp_eventing_items_sent
- ep_dcp_eventing_producer_count
- ep_dcp_eventing_total_backlog_size
- ep_dcp_eventing_total_bytes
- ep_dcp_fts_backoff
- ep_dcp_fts_count
- ep_dcp_fts_items_remaining
- ep_dcp_fts_items_sent
- ep_dcp_fts_producer_count
- ep_dcp_fts_total_backlog_size
- ep_dcp_fts_total_bytes
- ep_dcp_other_backoff
- ep_dcp_other_count
- ep_dcp_other_items_remaining
- ep_dcp_other_items_sent
- ep_dcp_other_producer_count
- ep_dcp_other_total_backlog_size
- ep_dcp_other_total_bytes
- ep_dcp_replica_backoff
- ep_dcp_replica_count
- ep_dcp_replica_items_remaining
- ep_dcp_replica_items_sent
- ep_dcp_replica_producer_count
- ep_dcp_replica_total_backlog_size
- ep_dcp_replica_total_bytes
- ep_dcp_views_backoff
- ep_dcp_views_count
- ep_dcp_views_items_remaining
- ep_dcp_views_items_sent
- ep_dcp_views_producer_count
- ep_dcp_views_total_backlog_size
- ep_dcp_views_total_bytes
- ep_dcp_xdcr_backoff
- ep_dcp_xdcr_count
- ep_dcp_xdcr_items_remaining
- ep_dcp_xdcr_items_sent
- ep_dcp_xdcr_producer_count
- ep_dcp_xdcr_total_backlog_size
- ep_dcp_xdcr_total_bytes
- ep_diskqueue_drain
- ep_diskqueue_fill
- ep_diskqueue_items
- ep_flusher_todo
- ep_item_commit_failed
- ep_kv_size
- ep_max_size
- ep_mem_high_wat
- ep_mem_low_wat
- ep_meta_data_memory
- ep_num_non_resident
- ep_num_ops_del_meta
- ep_num_ops_del_ret_meta
- ep_num_ops_get_meta
- ep_num_ops_set_meta
- ep_num_ops_set_ret_meta
- ep_num_value_ejects
- ep_oom_errors
- ep_ops_create
- ep_ops_update
- ep_overhead
- ep_queue_size
- ep_replica_ahead_exceptions
- ep_replica_hlc_drift
- ep_replica_hlc_drift_count
- ep_tmp_oom_errors
- ep_vb_total
- evictions
- get_hits
- get_misses
- incr_hits
- incr_misses
- mem_used
- misses
- ops
- timestamp
- vb_active_eject
- vb_active_itm_memory
- vb_active_meta_data_memory
- vb_active_num
- vb_active_num_non_resident
- vb_active_ops_create
- vb_active_ops_update
- vb_active_queue_age
- vb_active_queue_drain
- vb_active_queue_fill
- vb_active_queue_size
- vb_active_sync_write_aborted_count
- vb_active_sync_write_accepted_count
- vb_active_sync_write_committed_count
- vb_pending_curr_items
- vb_pending_eject
- vb_pending_itm_memory
- vb_pending_meta_data_memory
- vb_pending_num
- vb_pending_num_non_resident
- vb_pending_ops_create
- vb_pending_ops_update
- vb_pending_queue_age
- vb_pending_queue_drain
- vb_pending_queue_fill
- vb_pending_queue_size
- vb_replica_curr_items
- vb_replica_eject
- vb_replica_itm_memory
- vb_replica_meta_data_memory
- vb_replica_num
- vb_replica_num_non_resident
- vb_replica_ops_create
- vb_replica_ops_update
- vb_replica_queue_age
- vb_replica_queue_drain
- vb_replica_queue_fill
- vb_replica_queue_size
- vb_total_queue_age
- xdc_ops
- allocstall
- cpu_cores_available
- cpu_irq_rate
- cpu_stolen_rate
- cpu_sys_rate
- cpu_user_rate
- cpu_utilization_rate
- hibernated_requests
- hibernated_waked
- mem_actual_free
- mem_actual_used
- mem_free
- mem_limit
- mem_total
- mem_used_sys
- odp_report_failed
- rest_requests
- swap_total
- swap_used
## Example Output
```text
couchbase_node,cluster=http://localhost:8091/,hostname=172.17.0.2:8091 memory_free=7705575424,memory_total=16558182400 1547829754000000000
couchbase_bucket,bucket=beer-sample,cluster=http://localhost:8091/ quota_percent_used=27.09285736083984,ops_per_sec=0,disk_fetches=0,item_count=7303,disk_used=21662946,data_used=9325087,mem_used=28408920 1547829754000000000
```

View file

@ -0,0 +1,483 @@
//go:generate ../../../tools/readme_config_includer/generator
package couchbase
import (
_ "embed"
"encoding/json"
"fmt"
"net/http"
"regexp"
"sync"
"time"
"github.com/couchbase/go-couchbase"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal/choice"
"github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`)
type Couchbase struct {
Servers []string `toml:"servers"`
BucketStatsIncluded []string `toml:"bucket_stats_included"`
ClusterBucketStats bool `toml:"cluster_bucket_stats"`
NodeBucketStats bool `toml:"node_bucket_stats"`
AdditionalStats []string `toml:"additional_stats"`
bucketInclude filter.Filter
client *http.Client
tls.ClientConfig
}
type autoFailover struct {
Count int `json:"count"`
Enabled bool `json:"enabled"`
MaxCount int `json:"maxCount"`
Timeout int `json:"timeout"`
}
func (*Couchbase) SampleConfig() string {
return sampleConfig
}
func (cb *Couchbase) Init() error {
f, err := filter.NewIncludeExcludeFilter(cb.BucketStatsIncluded, nil)
if err != nil {
return err
}
cb.bucketInclude = f
tlsConfig, err := cb.TLSConfig()
if err != nil {
return err
}
cb.client = &http.Client{
Timeout: 10 * time.Second,
Transport: &http.Transport{
MaxIdleConnsPerHost: couchbase.MaxIdleConnsPerHost,
TLSClientConfig: tlsConfig,
},
}
couchbase.SetSkipVerify(cb.ClientConfig.InsecureSkipVerify)
couchbase.SetCertFile(cb.ClientConfig.TLSCert)
couchbase.SetKeyFile(cb.ClientConfig.TLSKey)
couchbase.SetRootFile(cb.ClientConfig.TLSCA)
return nil
}
// Gather reads stats from all configured clusters. Accumulates stats.
// Returns one of the errors encountered while gathering stats (if any).
func (cb *Couchbase) Gather(acc telegraf.Accumulator) error {
if len(cb.Servers) == 0 {
return cb.gatherServer(acc, "http://localhost:8091/")
}
var wg sync.WaitGroup
for _, serv := range cb.Servers {
wg.Add(1)
go func(serv string) {
defer wg.Done()
acc.AddError(cb.gatherServer(acc, serv))
}(serv)
}
wg.Wait()
return nil
}
func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string) error {
escapedAddr := regexpURI.ReplaceAllString(addr, "${1}")
client, err := couchbase.Connect(addr)
if err != nil {
return err
}
// `default` is the only possible pool name. It's a
// placeholder for a possible future Couchbase feature. See
// http://stackoverflow.com/a/16990911/17498.
pool, err := client.GetPool("default")
if err != nil {
return err
}
defer pool.Close()
for i := 0; i < len(pool.Nodes); i++ {
node := pool.Nodes[i]
tags := map[string]string{"cluster": escapedAddr, "hostname": node.Hostname}
fields := make(map[string]interface{})
fields["memory_free"] = node.MemoryFree
fields["memory_total"] = node.MemoryTotal
acc.AddFields("couchbase_node", fields, tags)
}
cluster := regexpURI.ReplaceAllString(addr, "${1}")
for name, bucket := range pool.BucketMap {
if cb.ClusterBucketStats {
fields := cb.basicBucketStats(bucket.BasicStats)
tags := map[string]string{"cluster": cluster, "bucket": name}
err := cb.gatherDetailedBucketStats(addr, name, "", fields)
if err != nil {
return err
}
acc.AddFields("couchbase_bucket", fields, tags)
}
if cb.NodeBucketStats {
for _, node := range bucket.Nodes() {
fields := cb.basicBucketStats(bucket.BasicStats)
tags := map[string]string{"cluster": cluster, "bucket": name, "hostname": node.Hostname}
err := cb.gatherDetailedBucketStats(addr, name, node.Hostname, fields)
if err != nil {
return err
}
acc.AddFields("couchbase_node_bucket", fields, tags)
}
}
}
if choice.Contains("autofailover", cb.AdditionalStats) {
tags := map[string]string{"cluster": cluster}
fields, err := cb.gatherAutoFailoverStats(addr)
if err != nil {
return fmt.Errorf("unable to collect autofailover settings: %w", err)
}
acc.AddFields("couchbase_autofailover", fields, tags)
}
return nil
}
func (cb *Couchbase) gatherAutoFailoverStats(server string) (map[string]any, error) {
var fields map[string]any
url := server + "/settings/autoFailover"
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return fields, err
}
r, err := cb.client.Do(req)
if err != nil {
return fields, err
}
defer r.Body.Close()
var stats autoFailover
if err := json.NewDecoder(r.Body).Decode(&stats); err != nil {
return fields, err
}
fields = map[string]any{
"count": stats.Count,
"enabled": stats.Enabled,
"max_count": stats.MaxCount,
"timeout": stats.Timeout,
}
return fields, nil
}
// basicBucketStats gets the basic bucket statistics
func (cb *Couchbase) basicBucketStats(basicStats map[string]interface{}) map[string]interface{} {
fields := make(map[string]interface{})
cb.addBucketField(fields, "quota_percent_used", basicStats["quotaPercentUsed"])
cb.addBucketField(fields, "ops_per_sec", basicStats["opsPerSec"])
cb.addBucketField(fields, "disk_fetches", basicStats["diskFetches"])
cb.addBucketField(fields, "item_count", basicStats["itemCount"])
cb.addBucketField(fields, "disk_used", basicStats["diskUsed"])
cb.addBucketField(fields, "data_used", basicStats["dataUsed"])
cb.addBucketField(fields, "mem_used", basicStats["memUsed"])
return fields
}
func (cb *Couchbase) gatherDetailedBucketStats(server, bucket, nodeHostname string, fields map[string]interface{}) error {
extendedBucketStats := &bucketStats{}
err := cb.queryDetailedBucketStats(server, bucket, nodeHostname, extendedBucketStats)
if err != nil {
return err
}
cb.addBucketFieldChecked(fields, "couch_total_disk_size", extendedBucketStats.Op.Samples.CouchTotalDiskSize)
cb.addBucketFieldChecked(fields, "couch_docs_fragmentation", extendedBucketStats.Op.Samples.CouchDocsFragmentation)
cb.addBucketFieldChecked(fields, "couch_views_fragmentation", extendedBucketStats.Op.Samples.CouchViewsFragmentation)
cb.addBucketFieldChecked(fields, "hit_ratio", extendedBucketStats.Op.Samples.HitRatio)
cb.addBucketFieldChecked(fields, "ep_cache_miss_rate", extendedBucketStats.Op.Samples.EpCacheMissRate)
cb.addBucketFieldChecked(fields, "ep_resident_items_rate", extendedBucketStats.Op.Samples.EpResidentItemsRate)
cb.addBucketFieldChecked(fields, "vb_avg_active_queue_age", extendedBucketStats.Op.Samples.VbAvgActiveQueueAge)
cb.addBucketFieldChecked(fields, "vb_avg_replica_queue_age", extendedBucketStats.Op.Samples.VbAvgReplicaQueueAge)
cb.addBucketFieldChecked(fields, "vb_avg_pending_queue_age", extendedBucketStats.Op.Samples.VbAvgPendingQueueAge)
cb.addBucketFieldChecked(fields, "vb_avg_total_queue_age", extendedBucketStats.Op.Samples.VbAvgTotalQueueAge)
cb.addBucketFieldChecked(fields, "vb_active_resident_items_ratio", extendedBucketStats.Op.Samples.VbActiveResidentItemsRatio)
cb.addBucketFieldChecked(fields, "vb_replica_resident_items_ratio", extendedBucketStats.Op.Samples.VbReplicaResidentItemsRatio)
cb.addBucketFieldChecked(fields, "vb_pending_resident_items_ratio", extendedBucketStats.Op.Samples.VbPendingResidentItemsRatio)
cb.addBucketFieldChecked(fields, "avg_disk_update_time", extendedBucketStats.Op.Samples.AvgDiskUpdateTime)
cb.addBucketFieldChecked(fields, "avg_disk_commit_time", extendedBucketStats.Op.Samples.AvgDiskCommitTime)
cb.addBucketFieldChecked(fields, "avg_bg_wait_time", extendedBucketStats.Op.Samples.AvgBgWaitTime)
cb.addBucketFieldChecked(fields, "avg_active_timestamp_drift", extendedBucketStats.Op.Samples.AvgActiveTimestampDrift)
cb.addBucketFieldChecked(fields, "avg_replica_timestamp_drift", extendedBucketStats.Op.Samples.AvgReplicaTimestampDrift)
cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesCount)
cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsRemaining)
cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesProducerCount)
cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBacklogSize)
cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsSent)
cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBytes)
cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_backoff", extendedBucketStats.Op.Samples.EpDcpViewsIndexesBackoff)
cb.addBucketFieldChecked(fields, "bg_wait_count", extendedBucketStats.Op.Samples.BgWaitCount)
cb.addBucketFieldChecked(fields, "bg_wait_total", extendedBucketStats.Op.Samples.BgWaitTotal)
cb.addBucketFieldChecked(fields, "bytes_read", extendedBucketStats.Op.Samples.BytesRead)
cb.addBucketFieldChecked(fields, "bytes_written", extendedBucketStats.Op.Samples.BytesWritten)
cb.addBucketFieldChecked(fields, "cas_badval", extendedBucketStats.Op.Samples.CasBadval)
cb.addBucketFieldChecked(fields, "cas_hits", extendedBucketStats.Op.Samples.CasHits)
cb.addBucketFieldChecked(fields, "cas_misses", extendedBucketStats.Op.Samples.CasMisses)
cb.addBucketFieldChecked(fields, "cmd_get", extendedBucketStats.Op.Samples.CmdGet)
cb.addBucketFieldChecked(fields, "cmd_lookup", extendedBucketStats.Op.Samples.CmdLookup)
cb.addBucketFieldChecked(fields, "cmd_set", extendedBucketStats.Op.Samples.CmdSet)
cb.addBucketFieldChecked(fields, "couch_docs_actual_disk_size", extendedBucketStats.Op.Samples.CouchDocsActualDiskSize)
cb.addBucketFieldChecked(fields, "couch_docs_data_size", extendedBucketStats.Op.Samples.CouchDocsDataSize)
cb.addBucketFieldChecked(fields, "couch_docs_disk_size", extendedBucketStats.Op.Samples.CouchDocsDiskSize)
cb.addBucketFieldChecked(fields, "couch_spatial_data_size", extendedBucketStats.Op.Samples.CouchSpatialDataSize)
cb.addBucketFieldChecked(fields, "couch_spatial_disk_size", extendedBucketStats.Op.Samples.CouchSpatialDiskSize)
cb.addBucketFieldChecked(fields, "couch_spatial_ops", extendedBucketStats.Op.Samples.CouchSpatialOps)
cb.addBucketFieldChecked(fields, "couch_views_actual_disk_size", extendedBucketStats.Op.Samples.CouchViewsActualDiskSize)
cb.addBucketFieldChecked(fields, "couch_views_data_size", extendedBucketStats.Op.Samples.CouchViewsDataSize)
cb.addBucketFieldChecked(fields, "couch_views_disk_size", extendedBucketStats.Op.Samples.CouchViewsDiskSize)
cb.addBucketFieldChecked(fields, "couch_views_ops", extendedBucketStats.Op.Samples.CouchViewsOps)
cb.addBucketFieldChecked(fields, "curr_connections", extendedBucketStats.Op.Samples.CurrConnections)
cb.addBucketFieldChecked(fields, "curr_items", extendedBucketStats.Op.Samples.CurrItems)
cb.addBucketFieldChecked(fields, "curr_items_tot", extendedBucketStats.Op.Samples.CurrItemsTot)
cb.addBucketFieldChecked(fields, "decr_hits", extendedBucketStats.Op.Samples.DecrHits)
cb.addBucketFieldChecked(fields, "decr_misses", extendedBucketStats.Op.Samples.DecrMisses)
cb.addBucketFieldChecked(fields, "delete_hits", extendedBucketStats.Op.Samples.DeleteHits)
cb.addBucketFieldChecked(fields, "delete_misses", extendedBucketStats.Op.Samples.DeleteMisses)
cb.addBucketFieldChecked(fields, "disk_commit_count", extendedBucketStats.Op.Samples.DiskCommitCount)
cb.addBucketFieldChecked(fields, "disk_commit_total", extendedBucketStats.Op.Samples.DiskCommitTotal)
cb.addBucketFieldChecked(fields, "disk_update_count", extendedBucketStats.Op.Samples.DiskUpdateCount)
cb.addBucketFieldChecked(fields, "disk_update_total", extendedBucketStats.Op.Samples.DiskUpdateTotal)
cb.addBucketFieldChecked(fields, "disk_write_queue", extendedBucketStats.Op.Samples.DiskWriteQueue)
cb.addBucketFieldChecked(fields, "ep_active_ahead_exceptions", extendedBucketStats.Op.Samples.EpActiveAheadExceptions)
cb.addBucketFieldChecked(fields, "ep_active_hlc_drift", extendedBucketStats.Op.Samples.EpActiveHlcDrift)
cb.addBucketFieldChecked(fields, "ep_active_hlc_drift_count", extendedBucketStats.Op.Samples.EpActiveHlcDriftCount)
cb.addBucketFieldChecked(fields, "ep_bg_fetched", extendedBucketStats.Op.Samples.EpBgFetched)
cb.addBucketFieldChecked(fields, "ep_clock_cas_drift_threshold_exceeded", extendedBucketStats.Op.Samples.EpClockCasDriftThresholdExceeded)
cb.addBucketFieldChecked(fields, "ep_data_read_failed", extendedBucketStats.Op.Samples.EpDataReadFailed)
cb.addBucketFieldChecked(fields, "ep_data_write_failed", extendedBucketStats.Op.Samples.EpDataWriteFailed)
cb.addBucketFieldChecked(fields, "ep_dcp_2i_backoff", extendedBucketStats.Op.Samples.EpDcp2IBackoff)
cb.addBucketFieldChecked(fields, "ep_dcp_2i_count", extendedBucketStats.Op.Samples.EpDcp2ICount)
cb.addBucketFieldChecked(fields, "ep_dcp_2i_items_remaining", extendedBucketStats.Op.Samples.EpDcp2IItemsRemaining)
cb.addBucketFieldChecked(fields, "ep_dcp_2i_items_sent", extendedBucketStats.Op.Samples.EpDcp2IItemsSent)
cb.addBucketFieldChecked(fields, "ep_dcp_2i_producer_count", extendedBucketStats.Op.Samples.EpDcp2IProducerCount)
cb.addBucketFieldChecked(fields, "ep_dcp_2i_total_backlog_size", extendedBucketStats.Op.Samples.EpDcp2ITotalBacklogSize)
cb.addBucketFieldChecked(fields, "ep_dcp_2i_total_bytes", extendedBucketStats.Op.Samples.EpDcp2ITotalBytes)
cb.addBucketFieldChecked(fields, "ep_dcp_cbas_backoff", extendedBucketStats.Op.Samples.EpDcpCbasBackoff)
cb.addBucketFieldChecked(fields, "ep_dcp_cbas_count", extendedBucketStats.Op.Samples.EpDcpCbasCount)
cb.addBucketFieldChecked(fields, "ep_dcp_cbas_items_remaining", extendedBucketStats.Op.Samples.EpDcpCbasItemsRemaining)
cb.addBucketFieldChecked(fields, "ep_dcp_cbas_items_sent", extendedBucketStats.Op.Samples.EpDcpCbasItemsSent)
cb.addBucketFieldChecked(fields, "ep_dcp_cbas_producer_count", extendedBucketStats.Op.Samples.EpDcpCbasProducerCount)
cb.addBucketFieldChecked(fields, "ep_dcp_cbas_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpCbasTotalBacklogSize)
cb.addBucketFieldChecked(fields, "ep_dcp_cbas_total_bytes", extendedBucketStats.Op.Samples.EpDcpCbasTotalBytes)
cb.addBucketFieldChecked(fields, "ep_dcp_eventing_backoff", extendedBucketStats.Op.Samples.EpDcpEventingBackoff)
cb.addBucketFieldChecked(fields, "ep_dcp_eventing_count", extendedBucketStats.Op.Samples.EpDcpEventingCount)
cb.addBucketFieldChecked(fields, "ep_dcp_eventing_items_remaining", extendedBucketStats.Op.Samples.EpDcpEventingItemsRemaining)
cb.addBucketFieldChecked(fields, "ep_dcp_eventing_items_sent", extendedBucketStats.Op.Samples.EpDcpEventingItemsSent)
cb.addBucketFieldChecked(fields, "ep_dcp_eventing_producer_count", extendedBucketStats.Op.Samples.EpDcpEventingProducerCount)
cb.addBucketFieldChecked(fields, "ep_dcp_eventing_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpEventingTotalBacklogSize)
cb.addBucketFieldChecked(fields, "ep_dcp_eventing_total_bytes", extendedBucketStats.Op.Samples.EpDcpEventingTotalBytes)
cb.addBucketFieldChecked(fields, "ep_dcp_fts_backoff", extendedBucketStats.Op.Samples.EpDcpFtsBackoff)
cb.addBucketFieldChecked(fields, "ep_dcp_fts_count", extendedBucketStats.Op.Samples.EpDcpFtsCount)
cb.addBucketFieldChecked(fields, "ep_dcp_fts_items_remaining", extendedBucketStats.Op.Samples.EpDcpFtsItemsRemaining)
cb.addBucketFieldChecked(fields, "ep_dcp_fts_items_sent", extendedBucketStats.Op.Samples.EpDcpFtsItemsSent)
cb.addBucketFieldChecked(fields, "ep_dcp_fts_producer_count", extendedBucketStats.Op.Samples.EpDcpFtsProducerCount)
cb.addBucketFieldChecked(fields, "ep_dcp_fts_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpFtsTotalBacklogSize)
cb.addBucketFieldChecked(fields, "ep_dcp_fts_total_bytes", extendedBucketStats.Op.Samples.EpDcpFtsTotalBytes)
cb.addBucketFieldChecked(fields, "ep_dcp_other_backoff", extendedBucketStats.Op.Samples.EpDcpOtherBackoff)
cb.addBucketFieldChecked(fields, "ep_dcp_other_count", extendedBucketStats.Op.Samples.EpDcpOtherCount)
cb.addBucketFieldChecked(fields, "ep_dcp_other_items_remaining", extendedBucketStats.Op.Samples.EpDcpOtherItemsRemaining)
cb.addBucketFieldChecked(fields, "ep_dcp_other_items_sent", extendedBucketStats.Op.Samples.EpDcpOtherItemsSent)
cb.addBucketFieldChecked(fields, "ep_dcp_other_producer_count", extendedBucketStats.Op.Samples.EpDcpOtherProducerCount)
cb.addBucketFieldChecked(fields, "ep_dcp_other_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpOtherTotalBacklogSize)
cb.addBucketFieldChecked(fields, "ep_dcp_other_total_bytes", extendedBucketStats.Op.Samples.EpDcpOtherTotalBytes)
cb.addBucketFieldChecked(fields, "ep_dcp_replica_backoff", extendedBucketStats.Op.Samples.EpDcpReplicaBackoff)
cb.addBucketFieldChecked(fields, "ep_dcp_replica_count", extendedBucketStats.Op.Samples.EpDcpReplicaCount)
cb.addBucketFieldChecked(fields, "ep_dcp_replica_items_remaining", extendedBucketStats.Op.Samples.EpDcpReplicaItemsRemaining)
cb.addBucketFieldChecked(fields, "ep_dcp_replica_items_sent", extendedBucketStats.Op.Samples.EpDcpReplicaItemsSent)
cb.addBucketFieldChecked(fields, "ep_dcp_replica_producer_count", extendedBucketStats.Op.Samples.EpDcpReplicaProducerCount)
cb.addBucketFieldChecked(fields, "ep_dcp_replica_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBacklogSize)
cb.addBucketFieldChecked(fields, "ep_dcp_replica_total_bytes", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBytes)
cb.addBucketFieldChecked(fields, "ep_dcp_views_backoff", extendedBucketStats.Op.Samples.EpDcpViewsBackoff)
cb.addBucketFieldChecked(fields, "ep_dcp_views_count", extendedBucketStats.Op.Samples.EpDcpViewsCount)
cb.addBucketFieldChecked(fields, "ep_dcp_views_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsItemsRemaining)
cb.addBucketFieldChecked(fields, "ep_dcp_views_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsItemsSent)
cb.addBucketFieldChecked(fields, "ep_dcp_views_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsProducerCount)
cb.addBucketFieldChecked(fields, "ep_dcp_views_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsTotalBacklogSize)
cb.addBucketFieldChecked(fields, "ep_dcp_views_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsTotalBytes)
cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_backoff", extendedBucketStats.Op.Samples.EpDcpXdcrBackoff)
cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_count", extendedBucketStats.Op.Samples.EpDcpXdcrCount)
cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_items_remaining", extendedBucketStats.Op.Samples.EpDcpXdcrItemsRemaining)
cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_items_sent", extendedBucketStats.Op.Samples.EpDcpXdcrItemsSent)
cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_producer_count", extendedBucketStats.Op.Samples.EpDcpXdcrProducerCount)
cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBacklogSize)
cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_total_bytes", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBytes)
cb.addBucketFieldChecked(fields, "ep_diskqueue_drain", extendedBucketStats.Op.Samples.EpDiskqueueDrain)
cb.addBucketFieldChecked(fields, "ep_diskqueue_fill", extendedBucketStats.Op.Samples.EpDiskqueueFill)
cb.addBucketFieldChecked(fields, "ep_diskqueue_items", extendedBucketStats.Op.Samples.EpDiskqueueItems)
cb.addBucketFieldChecked(fields, "ep_flusher_todo", extendedBucketStats.Op.Samples.EpFlusherTodo)
cb.addBucketFieldChecked(fields, "ep_item_commit_failed", extendedBucketStats.Op.Samples.EpItemCommitFailed)
cb.addBucketFieldChecked(fields, "ep_kv_size", extendedBucketStats.Op.Samples.EpKvSize)
cb.addBucketFieldChecked(fields, "ep_max_size", extendedBucketStats.Op.Samples.EpMaxSize)
cb.addBucketFieldChecked(fields, "ep_mem_high_wat", extendedBucketStats.Op.Samples.EpMemHighWat)
cb.addBucketFieldChecked(fields, "ep_mem_low_wat", extendedBucketStats.Op.Samples.EpMemLowWat)
cb.addBucketFieldChecked(fields, "ep_meta_data_memory", extendedBucketStats.Op.Samples.EpMetaDataMemory)
cb.addBucketFieldChecked(fields, "ep_num_non_resident", extendedBucketStats.Op.Samples.EpNumNonResident)
cb.addBucketFieldChecked(fields, "ep_num_ops_del_meta", extendedBucketStats.Op.Samples.EpNumOpsDelMeta)
cb.addBucketFieldChecked(fields, "ep_num_ops_del_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsDelRetMeta)
cb.addBucketFieldChecked(fields, "ep_num_ops_get_meta", extendedBucketStats.Op.Samples.EpNumOpsGetMeta)
cb.addBucketFieldChecked(fields, "ep_num_ops_set_meta", extendedBucketStats.Op.Samples.EpNumOpsSetMeta)
cb.addBucketFieldChecked(fields, "ep_num_ops_set_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsSetRetMeta)
cb.addBucketFieldChecked(fields, "ep_num_value_ejects", extendedBucketStats.Op.Samples.EpNumValueEjects)
cb.addBucketFieldChecked(fields, "ep_oom_errors", extendedBucketStats.Op.Samples.EpOomErrors)
cb.addBucketFieldChecked(fields, "ep_ops_create", extendedBucketStats.Op.Samples.EpOpsCreate)
cb.addBucketFieldChecked(fields, "ep_ops_update", extendedBucketStats.Op.Samples.EpOpsUpdate)
cb.addBucketFieldChecked(fields, "ep_overhead", extendedBucketStats.Op.Samples.EpOverhead)
cb.addBucketFieldChecked(fields, "ep_queue_size", extendedBucketStats.Op.Samples.EpQueueSize)
cb.addBucketFieldChecked(fields, "ep_replica_ahead_exceptions", extendedBucketStats.Op.Samples.EpReplicaAheadExceptions)
cb.addBucketFieldChecked(fields, "ep_replica_hlc_drift", extendedBucketStats.Op.Samples.EpReplicaHlcDrift)
cb.addBucketFieldChecked(fields, "ep_replica_hlc_drift_count", extendedBucketStats.Op.Samples.EpReplicaHlcDriftCount)
cb.addBucketFieldChecked(fields, "ep_tmp_oom_errors", extendedBucketStats.Op.Samples.EpTmpOomErrors)
cb.addBucketFieldChecked(fields, "ep_vb_total", extendedBucketStats.Op.Samples.EpVbTotal)
cb.addBucketFieldChecked(fields, "evictions", extendedBucketStats.Op.Samples.Evictions)
cb.addBucketFieldChecked(fields, "get_hits", extendedBucketStats.Op.Samples.GetHits)
cb.addBucketFieldChecked(fields, "get_misses", extendedBucketStats.Op.Samples.GetMisses)
cb.addBucketFieldChecked(fields, "incr_hits", extendedBucketStats.Op.Samples.IncrHits)
cb.addBucketFieldChecked(fields, "incr_misses", extendedBucketStats.Op.Samples.IncrMisses)
cb.addBucketFieldChecked(fields, "misses", extendedBucketStats.Op.Samples.Misses)
cb.addBucketFieldChecked(fields, "ops", extendedBucketStats.Op.Samples.Ops)
cb.addBucketFieldChecked(fields, "timestamp", extendedBucketStats.Op.Samples.Timestamp)
cb.addBucketFieldChecked(fields, "vb_active_eject", extendedBucketStats.Op.Samples.VbActiveEject)
cb.addBucketFieldChecked(fields, "vb_active_itm_memory", extendedBucketStats.Op.Samples.VbActiveItmMemory)
cb.addBucketFieldChecked(fields, "vb_active_meta_data_memory", extendedBucketStats.Op.Samples.VbActiveMetaDataMemory)
cb.addBucketFieldChecked(fields, "vb_active_num", extendedBucketStats.Op.Samples.VbActiveNum)
cb.addBucketFieldChecked(fields, "vb_active_num_non_resident", extendedBucketStats.Op.Samples.VbActiveNumNonResident)
cb.addBucketFieldChecked(fields, "vb_active_ops_create", extendedBucketStats.Op.Samples.VbActiveOpsCreate)
cb.addBucketFieldChecked(fields, "vb_active_ops_update", extendedBucketStats.Op.Samples.VbActiveOpsUpdate)
cb.addBucketFieldChecked(fields, "vb_active_queue_age", extendedBucketStats.Op.Samples.VbActiveQueueAge)
cb.addBucketFieldChecked(fields, "vb_active_queue_drain", extendedBucketStats.Op.Samples.VbActiveQueueDrain)
cb.addBucketFieldChecked(fields, "vb_active_queue_fill", extendedBucketStats.Op.Samples.VbActiveQueueFill)
cb.addBucketFieldChecked(fields, "vb_active_queue_size", extendedBucketStats.Op.Samples.VbActiveQueueSize)
cb.addBucketFieldChecked(fields, "vb_active_sync_write_aborted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAbortedCount)
cb.addBucketFieldChecked(fields, "vb_active_sync_write_accepted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAcceptedCount)
cb.addBucketFieldChecked(fields, "vb_active_sync_write_committed_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteCommittedCount)
cb.addBucketFieldChecked(fields, "vb_pending_curr_items", extendedBucketStats.Op.Samples.VbPendingCurrItems)
cb.addBucketFieldChecked(fields, "vb_pending_eject", extendedBucketStats.Op.Samples.VbPendingEject)
cb.addBucketFieldChecked(fields, "vb_pending_itm_memory", extendedBucketStats.Op.Samples.VbPendingItmMemory)
cb.addBucketFieldChecked(fields, "vb_pending_meta_data_memory", extendedBucketStats.Op.Samples.VbPendingMetaDataMemory)
cb.addBucketFieldChecked(fields, "vb_pending_num", extendedBucketStats.Op.Samples.VbPendingNum)
cb.addBucketFieldChecked(fields, "vb_pending_num_non_resident", extendedBucketStats.Op.Samples.VbPendingNumNonResident)
cb.addBucketFieldChecked(fields, "vb_pending_ops_create", extendedBucketStats.Op.Samples.VbPendingOpsCreate)
cb.addBucketFieldChecked(fields, "vb_pending_ops_update", extendedBucketStats.Op.Samples.VbPendingOpsUpdate)
cb.addBucketFieldChecked(fields, "vb_pending_queue_age", extendedBucketStats.Op.Samples.VbPendingQueueAge)
cb.addBucketFieldChecked(fields, "vb_pending_queue_drain", extendedBucketStats.Op.Samples.VbPendingQueueDrain)
cb.addBucketFieldChecked(fields, "vb_pending_queue_fill", extendedBucketStats.Op.Samples.VbPendingQueueFill)
cb.addBucketFieldChecked(fields, "vb_pending_queue_size", extendedBucketStats.Op.Samples.VbPendingQueueSize)
cb.addBucketFieldChecked(fields, "vb_replica_curr_items", extendedBucketStats.Op.Samples.VbReplicaCurrItems)
cb.addBucketFieldChecked(fields, "vb_replica_eject", extendedBucketStats.Op.Samples.VbReplicaEject)
cb.addBucketFieldChecked(fields, "vb_replica_itm_memory", extendedBucketStats.Op.Samples.VbReplicaItmMemory)
cb.addBucketFieldChecked(fields, "vb_replica_meta_data_memory", extendedBucketStats.Op.Samples.VbReplicaMetaDataMemory)
cb.addBucketFieldChecked(fields, "vb_replica_num", extendedBucketStats.Op.Samples.VbReplicaNum)
cb.addBucketFieldChecked(fields, "vb_replica_num_non_resident", extendedBucketStats.Op.Samples.VbReplicaNumNonResident)
cb.addBucketFieldChecked(fields, "vb_replica_ops_create", extendedBucketStats.Op.Samples.VbReplicaOpsCreate)
cb.addBucketFieldChecked(fields, "vb_replica_ops_update", extendedBucketStats.Op.Samples.VbReplicaOpsUpdate)
cb.addBucketFieldChecked(fields, "vb_replica_queue_age", extendedBucketStats.Op.Samples.VbReplicaQueueAge)
cb.addBucketFieldChecked(fields, "vb_replica_queue_drain", extendedBucketStats.Op.Samples.VbReplicaQueueDrain)
cb.addBucketFieldChecked(fields, "vb_replica_queue_fill", extendedBucketStats.Op.Samples.VbReplicaQueueFill)
cb.addBucketFieldChecked(fields, "vb_replica_queue_size", extendedBucketStats.Op.Samples.VbReplicaQueueSize)
cb.addBucketFieldChecked(fields, "vb_total_queue_age", extendedBucketStats.Op.Samples.VbTotalQueueAge)
cb.addBucketFieldChecked(fields, "xdc_ops", extendedBucketStats.Op.Samples.XdcOps)
cb.addBucketFieldChecked(fields, "allocstall", extendedBucketStats.Op.Samples.Allocstall)
cb.addBucketFieldChecked(fields, "cpu_cores_available", extendedBucketStats.Op.Samples.CPUCoresAvailable)
cb.addBucketFieldChecked(fields, "cpu_irq_rate", extendedBucketStats.Op.Samples.CPUIrqRate)
cb.addBucketFieldChecked(fields, "cpu_stolen_rate", extendedBucketStats.Op.Samples.CPUStolenRate)
cb.addBucketFieldChecked(fields, "cpu_sys_rate", extendedBucketStats.Op.Samples.CPUSysRate)
cb.addBucketFieldChecked(fields, "cpu_user_rate", extendedBucketStats.Op.Samples.CPUUserRate)
cb.addBucketFieldChecked(fields, "cpu_utilization_rate", extendedBucketStats.Op.Samples.CPUUtilizationRate)
cb.addBucketFieldChecked(fields, "hibernated_requests", extendedBucketStats.Op.Samples.HibernatedRequests)
cb.addBucketFieldChecked(fields, "hibernated_waked", extendedBucketStats.Op.Samples.HibernatedWaked)
cb.addBucketFieldChecked(fields, "mem_actual_free", extendedBucketStats.Op.Samples.MemActualFree)
cb.addBucketFieldChecked(fields, "mem_actual_used", extendedBucketStats.Op.Samples.MemActualUsed)
cb.addBucketFieldChecked(fields, "mem_free", extendedBucketStats.Op.Samples.MemFree)
cb.addBucketFieldChecked(fields, "mem_limit", extendedBucketStats.Op.Samples.MemLimit)
cb.addBucketFieldChecked(fields, "mem_total", extendedBucketStats.Op.Samples.MemTotal)
cb.addBucketFieldChecked(fields, "mem_used_sys", extendedBucketStats.Op.Samples.MemUsedSys)
cb.addBucketFieldChecked(fields, "odp_report_failed", extendedBucketStats.Op.Samples.OdpReportFailed)
cb.addBucketFieldChecked(fields, "rest_requests", extendedBucketStats.Op.Samples.RestRequests)
cb.addBucketFieldChecked(fields, "swap_total", extendedBucketStats.Op.Samples.SwapTotal)
cb.addBucketFieldChecked(fields, "swap_used", extendedBucketStats.Op.Samples.SwapUsed)
return nil
}
func (cb *Couchbase) addBucketField(fields map[string]interface{}, fieldKey string, value interface{}) {
if !cb.bucketInclude.Match(fieldKey) {
return
}
fields[fieldKey] = value
}
func (cb *Couchbase) addBucketFieldChecked(fields map[string]interface{}, fieldKey string, values []float64) {
if values == nil {
return
}
cb.addBucketField(fields, fieldKey, values[len(values)-1])
}
func (cb *Couchbase) queryDetailedBucketStats(server, bucket, nodeHostname string, bucketStats *bucketStats) error {
url := server + "/pools/default/buckets/" + bucket
if nodeHostname != "" {
url += "/nodes/" + nodeHostname
}
url += "/stats?"
// Set up an HTTP request to get the complete set of bucket stats.
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
r, err := cb.client.Do(req)
if err != nil {
return err
}
defer r.Body.Close()
return json.NewDecoder(r.Body).Decode(bucketStats)
}
func init() {
inputs.Add("couchbase", func() telegraf.Input {
return &Couchbase{
BucketStatsIncluded: []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"},
ClusterBucketStats: true,
}
})
}

View file

@ -0,0 +1,228 @@
package couchbase
type bucketStats struct {
Op struct {
Samples struct {
CouchTotalDiskSize []float64 `json:"couch_total_disk_size"`
CouchDocsFragmentation []float64 `json:"couch_docs_fragmentation"`
CouchViewsFragmentation []float64 `json:"couch_views_fragmentation"`
HitRatio []float64 `json:"hit_ratio"`
EpCacheMissRate []float64 `json:"ep_cache_miss_rate"`
EpResidentItemsRate []float64 `json:"ep_resident_items_rate"`
VbAvgActiveQueueAge []float64 `json:"vb_avg_active_queue_age"`
VbAvgReplicaQueueAge []float64 `json:"vb_avg_replica_queue_age"`
VbAvgPendingQueueAge []float64 `json:"vb_avg_pending_queue_age"`
VbAvgTotalQueueAge []float64 `json:"vb_avg_total_queue_age"`
VbActiveResidentItemsRatio []float64 `json:"vb_active_resident_items_ratio"`
VbReplicaResidentItemsRatio []float64 `json:"vb_replica_resident_items_ratio"`
VbPendingResidentItemsRatio []float64 `json:"vb_pending_resident_items_ratio"`
AvgDiskUpdateTime []float64 `json:"avg_disk_update_time"`
AvgDiskCommitTime []float64 `json:"avg_disk_commit_time"`
AvgBgWaitTime []float64 `json:"avg_bg_wait_time"`
AvgActiveTimestampDrift []float64 `json:"avg_active_timestamp_drift"`
AvgReplicaTimestampDrift []float64 `json:"avg_replica_timestamp_drift"`
EpDcpViewsIndexesCount []float64 `json:"ep_dcp_views+indexes_count"`
EpDcpViewsIndexesItemsRemaining []float64 `json:"ep_dcp_views+indexes_items_remaining"`
EpDcpViewsIndexesProducerCount []float64 `json:"ep_dcp_views+indexes_producer_count"`
EpDcpViewsIndexesTotalBacklogSize []float64 `json:"ep_dcp_views+indexes_total_backlog_size"`
EpDcpViewsIndexesItemsSent []float64 `json:"ep_dcp_views+indexes_items_sent"`
EpDcpViewsIndexesTotalBytes []float64 `json:"ep_dcp_views+indexes_total_bytes"`
EpDcpViewsIndexesBackoff []float64 `json:"ep_dcp_views+indexes_backoff"`
BgWaitCount []float64 `json:"bg_wait_count"`
BgWaitTotal []float64 `json:"bg_wait_total"`
BytesRead []float64 `json:"bytes_read"`
BytesWritten []float64 `json:"bytes_written"`
CasBadval []float64 `json:"cas_badval"`
CasHits []float64 `json:"cas_hits"`
CasMisses []float64 `json:"cas_misses"`
CmdGet []float64 `json:"cmd_get"`
CmdLookup []float64 `json:"cmd_lookup"`
CmdSet []float64 `json:"cmd_set"`
CouchDocsActualDiskSize []float64 `json:"couch_docs_actual_disk_size"`
CouchDocsDataSize []float64 `json:"couch_docs_data_size"`
CouchDocsDiskSize []float64 `json:"couch_docs_disk_size"`
CouchSpatialDataSize []float64 `json:"couch_spatial_data_size"`
CouchSpatialDiskSize []float64 `json:"couch_spatial_disk_size"`
CouchSpatialOps []float64 `json:"couch_spatial_ops"`
CouchViewsActualDiskSize []float64 `json:"couch_views_actual_disk_size"`
CouchViewsDataSize []float64 `json:"couch_views_data_size"`
CouchViewsDiskSize []float64 `json:"couch_views_disk_size"`
CouchViewsOps []float64 `json:"couch_views_ops"`
CurrConnections []float64 `json:"curr_connections"`
CurrItems []float64 `json:"curr_items"`
CurrItemsTot []float64 `json:"curr_items_tot"`
DecrHits []float64 `json:"decr_hits"`
DecrMisses []float64 `json:"decr_misses"`
DeleteHits []float64 `json:"delete_hits"`
DeleteMisses []float64 `json:"delete_misses"`
DiskCommitCount []float64 `json:"disk_commit_count"`
DiskCommitTotal []float64 `json:"disk_commit_total"`
DiskUpdateCount []float64 `json:"disk_update_count"`
DiskUpdateTotal []float64 `json:"disk_update_total"`
DiskWriteQueue []float64 `json:"disk_write_queue"`
EpActiveAheadExceptions []float64 `json:"ep_active_ahead_exceptions"`
EpActiveHlcDrift []float64 `json:"ep_active_hlc_drift"`
EpActiveHlcDriftCount []float64 `json:"ep_active_hlc_drift_count"`
EpBgFetched []float64 `json:"ep_bg_fetched"`
EpClockCasDriftThresholdExceeded []float64 `json:"ep_clock_cas_drift_threshold_exceeded"`
EpDataReadFailed []float64 `json:"ep_data_read_failed"`
EpDataWriteFailed []float64 `json:"ep_data_write_failed"`
EpDcp2IBackoff []float64 `json:"ep_dcp_2i_backoff"`
EpDcp2ICount []float64 `json:"ep_dcp_2i_count"`
EpDcp2IItemsRemaining []float64 `json:"ep_dcp_2i_items_remaining"`
EpDcp2IItemsSent []float64 `json:"ep_dcp_2i_items_sent"`
EpDcp2IProducerCount []float64 `json:"ep_dcp_2i_producer_count"`
EpDcp2ITotalBacklogSize []float64 `json:"ep_dcp_2i_total_backlog_size"`
EpDcp2ITotalBytes []float64 `json:"ep_dcp_2i_total_bytes"`
EpDcpCbasBackoff []float64 `json:"ep_dcp_cbas_backoff"`
EpDcpCbasCount []float64 `json:"ep_dcp_cbas_count"`
EpDcpCbasItemsRemaining []float64 `json:"ep_dcp_cbas_items_remaining"`
EpDcpCbasItemsSent []float64 `json:"ep_dcp_cbas_items_sent"`
EpDcpCbasProducerCount []float64 `json:"ep_dcp_cbas_producer_count"`
EpDcpCbasTotalBacklogSize []float64 `json:"ep_dcp_cbas_total_backlog_size"`
EpDcpCbasTotalBytes []float64 `json:"ep_dcp_cbas_total_bytes"`
EpDcpEventingBackoff []float64 `json:"ep_dcp_eventing_backoff"`
EpDcpEventingCount []float64 `json:"ep_dcp_eventing_count"`
EpDcpEventingItemsRemaining []float64 `json:"ep_dcp_eventing_items_remaining"`
EpDcpEventingItemsSent []float64 `json:"ep_dcp_eventing_items_sent"`
EpDcpEventingProducerCount []float64 `json:"ep_dcp_eventing_producer_count"`
EpDcpEventingTotalBacklogSize []float64 `json:"ep_dcp_eventing_total_backlog_size"`
EpDcpEventingTotalBytes []float64 `json:"ep_dcp_eventing_total_bytes"`
EpDcpFtsBackoff []float64 `json:"ep_dcp_fts_backoff"`
EpDcpFtsCount []float64 `json:"ep_dcp_fts_count"`
EpDcpFtsItemsRemaining []float64 `json:"ep_dcp_fts_items_remaining"`
EpDcpFtsItemsSent []float64 `json:"ep_dcp_fts_items_sent"`
EpDcpFtsProducerCount []float64 `json:"ep_dcp_fts_producer_count"`
EpDcpFtsTotalBacklogSize []float64 `json:"ep_dcp_fts_total_backlog_size"`
EpDcpFtsTotalBytes []float64 `json:"ep_dcp_fts_total_bytes"`
EpDcpOtherBackoff []float64 `json:"ep_dcp_other_backoff"`
EpDcpOtherCount []float64 `json:"ep_dcp_other_count"`
EpDcpOtherItemsRemaining []float64 `json:"ep_dcp_other_items_remaining"`
EpDcpOtherItemsSent []float64 `json:"ep_dcp_other_items_sent"`
EpDcpOtherProducerCount []float64 `json:"ep_dcp_other_producer_count"`
EpDcpOtherTotalBacklogSize []float64 `json:"ep_dcp_other_total_backlog_size"`
EpDcpOtherTotalBytes []float64 `json:"ep_dcp_other_total_bytes"`
EpDcpReplicaBackoff []float64 `json:"ep_dcp_replica_backoff"`
EpDcpReplicaCount []float64 `json:"ep_dcp_replica_count"`
EpDcpReplicaItemsRemaining []float64 `json:"ep_dcp_replica_items_remaining"`
EpDcpReplicaItemsSent []float64 `json:"ep_dcp_replica_items_sent"`
EpDcpReplicaProducerCount []float64 `json:"ep_dcp_replica_producer_count"`
EpDcpReplicaTotalBacklogSize []float64 `json:"ep_dcp_replica_total_backlog_size"`
EpDcpReplicaTotalBytes []float64 `json:"ep_dcp_replica_total_bytes"`
EpDcpViewsBackoff []float64 `json:"ep_dcp_views_backoff"`
EpDcpViewsCount []float64 `json:"ep_dcp_views_count"`
EpDcpViewsItemsRemaining []float64 `json:"ep_dcp_views_items_remaining"`
EpDcpViewsItemsSent []float64 `json:"ep_dcp_views_items_sent"`
EpDcpViewsProducerCount []float64 `json:"ep_dcp_views_producer_count"`
EpDcpViewsTotalBacklogSize []float64 `json:"ep_dcp_views_total_backlog_size"`
EpDcpViewsTotalBytes []float64 `json:"ep_dcp_views_total_bytes"`
EpDcpXdcrBackoff []float64 `json:"ep_dcp_xdcr_backoff"`
EpDcpXdcrCount []float64 `json:"ep_dcp_xdcr_count"`
EpDcpXdcrItemsRemaining []float64 `json:"ep_dcp_xdcr_items_remaining"`
EpDcpXdcrItemsSent []float64 `json:"ep_dcp_xdcr_items_sent"`
EpDcpXdcrProducerCount []float64 `json:"ep_dcp_xdcr_producer_count"`
EpDcpXdcrTotalBacklogSize []float64 `json:"ep_dcp_xdcr_total_backlog_size"`
EpDcpXdcrTotalBytes []float64 `json:"ep_dcp_xdcr_total_bytes"`
EpDiskqueueDrain []float64 `json:"ep_diskqueue_drain"`
EpDiskqueueFill []float64 `json:"ep_diskqueue_fill"`
EpDiskqueueItems []float64 `json:"ep_diskqueue_items"`
EpFlusherTodo []float64 `json:"ep_flusher_todo"`
EpItemCommitFailed []float64 `json:"ep_item_commit_failed"`
EpKvSize []float64 `json:"ep_kv_size"`
EpMaxSize []float64 `json:"ep_max_size"`
EpMemHighWat []float64 `json:"ep_mem_high_wat"`
EpMemLowWat []float64 `json:"ep_mem_low_wat"`
EpMetaDataMemory []float64 `json:"ep_meta_data_memory"`
EpNumNonResident []float64 `json:"ep_num_non_resident"`
EpNumOpsDelMeta []float64 `json:"ep_num_ops_del_meta"`
EpNumOpsDelRetMeta []float64 `json:"ep_num_ops_del_ret_meta"`
EpNumOpsGetMeta []float64 `json:"ep_num_ops_get_meta"`
EpNumOpsSetMeta []float64 `json:"ep_num_ops_set_meta"`
EpNumOpsSetRetMeta []float64 `json:"ep_num_ops_set_ret_meta"`
EpNumValueEjects []float64 `json:"ep_num_value_ejects"`
EpOomErrors []float64 `json:"ep_oom_errors"`
EpOpsCreate []float64 `json:"ep_ops_create"`
EpOpsUpdate []float64 `json:"ep_ops_update"`
EpOverhead []float64 `json:"ep_overhead"`
EpQueueSize []float64 `json:"ep_queue_size"`
EpReplicaAheadExceptions []float64 `json:"ep_replica_ahead_exceptions"`
EpReplicaHlcDrift []float64 `json:"ep_replica_hlc_drift"`
EpReplicaHlcDriftCount []float64 `json:"ep_replica_hlc_drift_count"`
EpTmpOomErrors []float64 `json:"ep_tmp_oom_errors"`
EpVbTotal []float64 `json:"ep_vb_total"`
Evictions []float64 `json:"evictions"`
GetHits []float64 `json:"get_hits"`
GetMisses []float64 `json:"get_misses"`
IncrHits []float64 `json:"incr_hits"`
IncrMisses []float64 `json:"incr_misses"`
MemUsed []float64 `json:"mem_used"`
Misses []float64 `json:"misses"`
Ops []float64 `json:"ops"`
Timestamp []float64 `json:"timestamp"`
VbActiveEject []float64 `json:"vb_active_eject"`
VbActiveItmMemory []float64 `json:"vb_active_itm_memory"`
VbActiveMetaDataMemory []float64 `json:"vb_active_meta_data_memory"`
VbActiveNum []float64 `json:"vb_active_num"`
VbActiveNumNonResident []float64 `json:"vb_active_num_non_resident"`
VbActiveOpsCreate []float64 `json:"vb_active_ops_create"`
VbActiveOpsUpdate []float64 `json:"vb_active_ops_update"`
VbActiveQueueAge []float64 `json:"vb_active_queue_age"`
VbActiveQueueDrain []float64 `json:"vb_active_queue_drain"`
VbActiveQueueFill []float64 `json:"vb_active_queue_fill"`
VbActiveQueueSize []float64 `json:"vb_active_queue_size"`
VbActiveSyncWriteAbortedCount []float64 `json:"vb_active_sync_write_aborted_count"`
VbActiveSyncWriteAcceptedCount []float64 `json:"vb_active_sync_write_accepted_count"`
VbActiveSyncWriteCommittedCount []float64 `json:"vb_active_sync_write_committed_count"`
VbPendingCurrItems []float64 `json:"vb_pending_curr_items"`
VbPendingEject []float64 `json:"vb_pending_eject"`
VbPendingItmMemory []float64 `json:"vb_pending_itm_memory"`
VbPendingMetaDataMemory []float64 `json:"vb_pending_meta_data_memory"`
VbPendingNum []float64 `json:"vb_pending_num"`
VbPendingNumNonResident []float64 `json:"vb_pending_num_non_resident"`
VbPendingOpsCreate []float64 `json:"vb_pending_ops_create"`
VbPendingOpsUpdate []float64 `json:"vb_pending_ops_update"`
VbPendingQueueAge []float64 `json:"vb_pending_queue_age"`
VbPendingQueueDrain []float64 `json:"vb_pending_queue_drain"`
VbPendingQueueFill []float64 `json:"vb_pending_queue_fill"`
VbPendingQueueSize []float64 `json:"vb_pending_queue_size"`
VbReplicaCurrItems []float64 `json:"vb_replica_curr_items"`
VbReplicaEject []float64 `json:"vb_replica_eject"`
VbReplicaItmMemory []float64 `json:"vb_replica_itm_memory"`
VbReplicaMetaDataMemory []float64 `json:"vb_replica_meta_data_memory"`
VbReplicaNum []float64 `json:"vb_replica_num"`
VbReplicaNumNonResident []float64 `json:"vb_replica_num_non_resident"`
VbReplicaOpsCreate []float64 `json:"vb_replica_ops_create"`
VbReplicaOpsUpdate []float64 `json:"vb_replica_ops_update"`
VbReplicaQueueAge []float64 `json:"vb_replica_queue_age"`
VbReplicaQueueDrain []float64 `json:"vb_replica_queue_drain"`
VbReplicaQueueFill []float64 `json:"vb_replica_queue_fill"`
VbReplicaQueueSize []float64 `json:"vb_replica_queue_size"`
VbTotalQueueAge []float64 `json:"vb_total_queue_age"`
XdcOps []float64 `json:"xdc_ops"`
Allocstall []float64 `json:"allocstall"`
CPUCoresAvailable []float64 `json:"cpu_cores_available"`
CPUIrqRate []float64 `json:"cpu_irq_rate"`
CPUStolenRate []float64 `json:"cpu_stolen_rate"`
CPUSysRate []float64 `json:"cpu_sys_rate"`
CPUUserRate []float64 `json:"cpu_user_rate"`
CPUUtilizationRate []float64 `json:"cpu_utilization_rate"`
HibernatedRequests []float64 `json:"hibernated_requests"`
HibernatedWaked []float64 `json:"hibernated_waked"`
MemActualFree []float64 `json:"mem_actual_free"`
MemActualUsed []float64 `json:"mem_actual_used"`
MemFree []float64 `json:"mem_free"`
MemLimit []float64 `json:"mem_limit"`
MemTotal []float64 `json:"mem_total"`
MemUsedSys []float64 `json:"mem_used_sys"`
OdpReportFailed []float64 `json:"odp_report_failed"`
RestRequests []float64 `json:"rest_requests"`
SwapTotal []float64 `json:"swap_total"`
SwapUsed []float64 `json:"swap_used"`
} `json:"samples"`
Samplescount int `json:"samplesCount"`
Ispersistent bool `json:"isPersistent"`
Lasttstamp int64 `json:"lastTStamp"`
Interval int `json:"interval"`
} `json:"op"`
HotKeys []interface{} `json:"hot_keys"`
}

View file

@ -0,0 +1,274 @@
package couchbase
import (
"encoding/json"
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/testutil"
)
func TestGatherServer(t *testing.T) {
bucket := "blastro-df"
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/pools" {
if _, err := w.Write(readJSON(t, "testdata/pools_response.json")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
} else if r.URL.Path == "/pools/default" {
if _, err := w.Write(readJSON(t, "testdata/pools_default_response.json")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
} else if r.URL.Path == "/pools/default/buckets" {
if _, err := w.Write(readJSON(t, "testdata/bucket_response.json")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
} else if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" {
if _, err := w.Write(readJSON(t, "testdata/bucket_stats_response.json")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
cb := Couchbase{
ClusterBucketStats: true,
BucketStatsIncluded: []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"},
}
require.NoError(t, cb.Init())
var acc testutil.Accumulator
require.NoError(t, cb.gatherServer(&acc, fakeServer.URL))
acc.AssertContainsTaggedFields(t, "couchbase_node",
map[string]interface{}{"memory_free": 23181365248.0, "memory_total": 64424656896.0},
map[string]string{"cluster": fakeServer.URL, "hostname": "172.16.10.187:8091"})
acc.AssertContainsTaggedFields(t, "couchbase_node",
map[string]interface{}{"memory_free": 23665811456.0, "memory_total": 64424656896.0},
map[string]string{"cluster": fakeServer.URL, "hostname": "172.16.10.65:8091"})
acc.AssertContainsTaggedFields(t, "couchbase_bucket",
map[string]interface{}{
"quota_percent_used": 68.85424936294555,
"ops_per_sec": 5686.789686789687,
"disk_fetches": 0.0,
"item_count": 943239752.0,
"disk_used": 409178772321.0,
"data_used": 212179309111.0,
"mem_used": 202156957464.0,
},
map[string]string{"cluster": fakeServer.URL, "bucket": "blastro-df"})
}
func TestSanitizeURI(t *testing.T) {
var sanitizeTest = []struct {
input string
expected string
}{
{"http://user:password@localhost:121", "http://localhost:121"},
{"user:password@localhost:12/endpoint", "localhost:12/endpoint"},
{"https://mail@address.com:password@localhost", "https://localhost"},
{"localhost", "localhost"},
{"user:password@localhost:2321", "localhost:2321"},
{"http://user:password@couchbase-0.example.com:8091/endpoint", "http://couchbase-0.example.com:8091/endpoint"},
{" ", " "},
}
for _, test := range sanitizeTest {
result := regexpURI.ReplaceAllString(test.input, "${1}")
if result != test.expected {
t.Errorf("TestSanitizeAddress: input %s, expected %s, actual %s", test.input, test.expected, result)
}
}
}
func TestGatherDetailedBucketMetrics(t *testing.T) {
bucket := "Ducks"
node := "172.94.77.2:8091"
bucketStatsResponse := readJSON(t, "testdata/bucket_stats_response.json")
bucketStatsResponseWithMissing := readJSON(t, "testdata/bucket_stats_response_with_missing.json")
nodeBucketStatsResponse := readJSON(t, "testdata/node_bucket_stats_response.json")
tests := []struct {
name string
node string
response []byte
}{
{
name: "cluster-level with all fields",
response: bucketStatsResponse,
},
{
name: "cluster-level with missing fields",
response: bucketStatsResponseWithMissing,
},
{
name: "node-level with all fields",
response: nodeBucketStatsResponse,
node: node,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" || r.URL.Path == "/pools/default/buckets/"+bucket+"/nodes/"+node+"/stats" {
if _, err := w.Write(test.response); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
var err error
var cb Couchbase
cb.BucketStatsIncluded = []string{"couch_total_disk_size"}
cb.ClientConfig = tls.ClientConfig{
InsecureSkipVerify: true,
}
err = cb.Init()
require.NoError(t, err)
var acc testutil.Accumulator
bucketStats := &bucketStats{}
if err := json.Unmarshal(test.response, bucketStats); err != nil {
t.Fatal("parse bucketResponse", err)
}
fields := make(map[string]interface{})
err = cb.gatherDetailedBucketStats(fakeServer.URL, bucket, test.node, fields)
require.NoError(t, err)
acc.AddFields("couchbase_bucket", fields, nil)
// Ensure we gathered only one metric (the one that we configured).
require.Len(t, acc.Metrics, 1)
require.Len(t, acc.Metrics[0].Fields, 1)
})
}
}
func TestGatherNodeOnly(t *testing.T) {
faker := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/pools" {
if _, err := w.Write(readJSON(t, "testdata/pools_response.json")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
} else if r.URL.Path == "/pools/default" {
if _, err := w.Write(readJSON(t, "testdata/pools_default_response.json")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
} else if r.URL.Path == "/pools/default/buckets" {
if _, err := w.Write(readJSON(t, "testdata/bucket_response.json")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
cb := Couchbase{
Servers: []string{faker.URL},
}
require.NoError(t, cb.Init())
var acc testutil.Accumulator
require.NoError(t, cb.gatherServer(&acc, faker.URL))
require.Empty(t, acc.Errors)
require.Len(t, acc.Metrics, 7)
acc.AssertDoesNotContainMeasurement(t, "couchbase_bucket")
}
func TestGatherFailover(t *testing.T) {
faker := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/pools":
if _, err := w.Write(readJSON(t, "testdata/pools_response.json")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case "/pools/default":
if _, err := w.Write(readJSON(t, "testdata/pools_default_response.json")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case "/pools/default/buckets":
if _, err := w.Write(readJSON(t, "testdata/bucket_response.json")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case "/settings/autoFailover":
if _, err := w.Write(readJSON(t, "testdata/settings_autofailover.json")); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
default:
w.WriteHeader(http.StatusNotFound)
}
}))
cb := Couchbase{
Servers: []string{faker.URL},
ClusterBucketStats: false,
NodeBucketStats: false,
AdditionalStats: []string{"autofailover"},
}
require.NoError(t, cb.Init())
var acc testutil.Accumulator
require.NoError(t, cb.gatherServer(&acc, faker.URL))
require.Empty(t, acc.Errors)
require.Len(t, acc.Metrics, 8)
var metric *testutil.Metric
for _, m := range acc.Metrics {
if m.Measurement == "couchbase_autofailover" {
metric = m
break
}
}
require.NotNil(t, metric)
require.Equal(t, 1, metric.Fields["count"])
v, ok := metric.Fields["enabled"].(bool)
require.Truef(t, ok, "bool type expected, got '%T' with '%v' value instead", metric.Fields["enabled"], metric.Fields["enabled"])
require.True(t, v)
require.Equal(t, 2, metric.Fields["max_count"])
require.Equal(t, 72, metric.Fields["timeout"])
}
func readJSON(t *testing.T, jsonFilePath string) []byte {
data, err := os.ReadFile(jsonFilePath)
require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath)
return data
}

View file

@ -0,0 +1,35 @@
# Read per-node and per-bucket metrics from Couchbase
[[inputs.couchbase]]
## specify servers via a url matching:
## [protocol://][:password]@address[:port]
## e.g.
## http://couchbase-0.example.com/
## http://admin:secret@couchbase-0.example.com:8091/
##
## If no servers are specified, then localhost is used as the host.
## If no protocol is specified, HTTP is used.
## If no port is specified, 8091 is used.
servers = ["http://localhost:8091"]
## Filter bucket fields to include only here.
# bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification (defaults to false)
## If set to false, tls_cert and tls_key are required
# insecure_skip_verify = false
## Whether to collect cluster-wide bucket statistics
## It is recommended to disable this in favor of node_stats
## to get a better view of the cluster.
# cluster_bucket_stats = true
## Whether to collect bucket stats for each individual node
# node_bucket_stats = false
## List of additional stats to collect, choose from:
## * autofailover
# additional_stats = []

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,78 @@
{
"op": {
"samples": {
"couch_total_disk_size": [
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341,
559341
]
},
"samplesCount": 60,
"isPersistent": true,
"lastTStamp": 1615918178442,
"interval": 1000
},
"hot_keys": [
{
"name": "first-duck",
"ops": 6.003482019571351e-05
}
]
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,404 @@
{
"storageTotals": {
"ram": {
"total": 450972598272,
"quotaTotal": 360777252864,
"quotaUsed": 360777252864,
"used": 446826622976,
"usedByData": 255061495696,
"quotaUsedPerNode": 51539607552,
"quotaTotalPerNode": 51539607552
},
"hdd": {
"total": 1108766539776,
"quotaTotal": 1108766539776,
"used": 559135126484,
"usedByData": 515767865143,
"free": 498944942902
}
},
"serverGroupsUri": "/pools/default/serverGroups",
"name": "default",
"alerts": [
"Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.",
"Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.",
"Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.",
"Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.",
"Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.",
"Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.",
"Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.",
"Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."
],
"alertsSilenceURL": "/controller/resetAlerts",
"nodes": [
{
"systemStats": {
"cpu_utilization_rate": 35.43307086614173,
"swap_total": 0,
"swap_used": 0,
"mem_total": 64424656896,
"mem_free": 23181365248
},
"interestingStats": {
"cmd_get": 17.98201798201798,
"couch_docs_actual_disk_size": 68506048063,
"couch_docs_data_size": 38718796110,
"couch_views_actual_disk_size": 0,
"couch_views_data_size": 0,
"curr_items": 140158886,
"curr_items_tot": 279374646,
"ep_bg_fetched": 0.999000999000999,
"get_hits": 10.98901098901099,
"mem_used": 36497390640,
"ops": 829.1708291708292,
"vb_replica_curr_items": 139215760
},
"uptime": "341236",
"memoryTotal": 64424656896,
"memoryFree": 23181365248,
"mcdMemoryReserved": 49152,
"mcdMemoryAllocated": 49152,
"couchApiBase": "http://172.16.10.187:8092/",
"clusterMembership": "active",
"recoveryType": "none",
"status": "healthy",
"otpNode": "ns_1@172.16.10.187",
"thisNode": true,
"hostname": "172.16.10.187:8091",
"clusterCompatibility": 196608,
"version": "3.0.1-1444-rel-community",
"os": "x86_64-unknown-linux-gnu",
"ports": {
"proxy": 11211,
"direct": 11210
}
},
{
"systemStats": {
"cpu_utilization_rate": 47.38255033557047,
"swap_total": 0,
"swap_used": 0,
"mem_total": 64424656896,
"mem_free": 23665811456
},
"interestingStats": {
"cmd_get": 172.8271728271728,
"couch_docs_actual_disk_size": 79360565405,
"couch_docs_data_size": 38736382876,
"couch_views_actual_disk_size": 0,
"couch_views_data_size": 0,
"curr_items": 140174377,
"curr_items_tot": 279383025,
"ep_bg_fetched": 0.999000999000999,
"get_hits": 167.8321678321678,
"mem_used": 36650059656,
"ops": 1685.314685314685,
"vb_replica_curr_items": 139208648
},
"uptime": "341210",
"memoryTotal": 64424656896,
"memoryFree": 23665811456,
"mcdMemoryReserved": 49152,
"mcdMemoryAllocated": 49152,
"couchApiBase": "http://172.16.10.65:8092/",
"clusterMembership": "active",
"recoveryType": "none",
"status": "healthy",
"otpNode": "ns_1@172.16.10.65",
"hostname": "172.16.10.65:8091",
"clusterCompatibility": 196608,
"version": "3.0.1-1444-rel-community",
"os": "x86_64-unknown-linux-gnu",
"ports": {
"proxy": 11211,
"direct": 11210
}
},
{
"systemStats": {
"cpu_utilization_rate": 25.5586592178771,
"swap_total": 0,
"swap_used": 0,
"mem_total": 64424656896,
"mem_free": 23726600192
},
"interestingStats": {
"cmd_get": 63.06306306306306,
"couch_docs_actual_disk_size": 79345105217,
"couch_docs_data_size": 38728086130,
"couch_views_actual_disk_size": 0,
"couch_views_data_size": 0,
"curr_items": 139195268,
"curr_items_tot": 279349113,
"ep_bg_fetched": 0,
"get_hits": 53.05305305305306,
"mem_used": 36476665576,
"ops": 1878.878878878879,
"vb_replica_curr_items": 140153845
},
"uptime": "341210",
"memoryTotal": 64424656896,
"memoryFree": 23726600192,
"mcdMemoryReserved": 49152,
"mcdMemoryAllocated": 49152,
"couchApiBase": "http://172.16.13.105:8092/",
"clusterMembership": "active",
"recoveryType": "none",
"status": "healthy",
"otpNode": "ns_1@172.16.13.105",
"hostname": "172.16.13.105:8091",
"clusterCompatibility": 196608,
"version": "3.0.1-1444-rel-community",
"os": "x86_64-unknown-linux-gnu",
"ports": {
"proxy": 11211,
"direct": 11210
}
},
{
"systemStats": {
"cpu_utilization_rate": 26.45803698435277,
"swap_total": 0,
"swap_used": 0,
"mem_total": 64424656896,
"mem_free": 23854841856
},
"interestingStats": {
"cmd_get": 51.05105105105105,
"couch_docs_actual_disk_size": 74465931949,
"couch_docs_data_size": 38723830730,
"couch_views_actual_disk_size": 0,
"couch_views_data_size": 0,
"curr_items": 139209869,
"curr_items_tot": 279380019,
"ep_bg_fetched": 0,
"get_hits": 47.04704704704704,
"mem_used": 36471784896,
"ops": 1831.831831831832,
"vb_replica_curr_items": 140170150
},
"uptime": "340526",
"memoryTotal": 64424656896,
"memoryFree": 23854841856,
"mcdMemoryReserved": 49152,
"mcdMemoryAllocated": 49152,
"couchApiBase": "http://172.16.13.173:8092/",
"clusterMembership": "active",
"recoveryType": "none",
"status": "healthy",
"otpNode": "ns_1@172.16.13.173",
"hostname": "172.16.13.173:8091",
"clusterCompatibility": 196608,
"version": "3.0.1-1444-rel-community",
"os": "x86_64-unknown-linux-gnu",
"ports": {
"proxy": 11211,
"direct": 11210
}
},
{
"systemStats": {
"cpu_utilization_rate": 47.31034482758621,
"swap_total": 0,
"swap_used": 0,
"mem_total": 64424656896,
"mem_free": 23773573120
},
"interestingStats": {
"cmd_get": 77.07707707707708,
"couch_docs_actual_disk_size": 74743093945,
"couch_docs_data_size": 38594660087,
"couch_views_actual_disk_size": 0,
"couch_views_data_size": 0,
"curr_items": 139215932,
"curr_items_tot": 278427644,
"ep_bg_fetched": 0,
"get_hits": 53.05305305305305,
"mem_used": 36306500344,
"ops": 1981.981981981982,
"vb_replica_curr_items": 139211712
},
"uptime": "340495",
"memoryTotal": 64424656896,
"memoryFree": 23773573120,
"mcdMemoryReserved": 49152,
"mcdMemoryAllocated": 49152,
"couchApiBase": "http://172.16.15.120:8092/",
"clusterMembership": "active",
"recoveryType": "none",
"status": "healthy",
"otpNode": "ns_1@172.16.15.120",
"hostname": "172.16.15.120:8091",
"clusterCompatibility": 196608,
"version": "3.0.1-1444-rel-community",
"os": "x86_64-unknown-linux-gnu",
"ports": {
"proxy": 11211,
"direct": 11210
}
},
{
"systemStats": {
"cpu_utilization_rate": 17.60660247592847,
"swap_total": 0,
"swap_used": 0,
"mem_total": 64424656896,
"mem_free": 23662190592
},
"interestingStats": {
"cmd_get": 146.8531468531468,
"couch_docs_actual_disk_size": 72932847344,
"couch_docs_data_size": 38581771457,
"couch_views_actual_disk_size": 0,
"couch_views_data_size": 0,
"curr_items": 139226879,
"curr_items_tot": 278436540,
"ep_bg_fetched": 0,
"get_hits": 144.8551448551448,
"mem_used": 36421860496,
"ops": 1495.504495504495,
"vb_replica_curr_items": 139209661
},
"uptime": "337174",
"memoryTotal": 64424656896,
"memoryFree": 23662190592,
"mcdMemoryReserved": 49152,
"mcdMemoryAllocated": 49152,
"couchApiBase": "http://172.16.8.127:8092/",
"clusterMembership": "active",
"recoveryType": "none",
"status": "healthy",
"otpNode": "ns_1@172.16.8.127",
"hostname": "172.16.8.127:8091",
"clusterCompatibility": 196608,
"version": "3.0.1-1444-rel-community",
"os": "x86_64-unknown-linux-gnu",
"ports": {
"proxy": 11211,
"direct": 11210
}
},
{
"systemStats": {
"cpu_utilization_rate": 21.68831168831169,
"swap_total": 0,
"swap_used": 0,
"mem_total": 64424656896,
"mem_free": 24049729536
},
"interestingStats": {
"cmd_get": 11.98801198801199,
"couch_docs_actual_disk_size": 66414273220,
"couch_docs_data_size": 38587642702,
"couch_views_actual_disk_size": 0,
"couch_views_data_size": 0,
"curr_items": 139193759,
"curr_items_tot": 278398926,
"ep_bg_fetched": 0,
"get_hits": 9.990009990009991,
"mem_used": 36237234088,
"ops": 883.1168831168832,
"vb_replica_curr_items": 139205167
},
"uptime": "341228",
"memoryTotal": 64424656896,
"memoryFree": 24049729536,
"mcdMemoryReserved": 49152,
"mcdMemoryAllocated": 49152,
"couchApiBase": "http://172.16.8.148:8092/",
"clusterMembership": "active",
"recoveryType": "none",
"status": "healthy",
"otpNode": "ns_1@172.16.8.148",
"hostname": "172.16.8.148:8091",
"clusterCompatibility": 196608,
"version": "3.0.1-1444-rel-community",
"os": "x86_64-unknown-linux-gnu",
"ports": {
"proxy": 11211,
"direct": 11210
}
}
],
"buckets": {
"uri": "/pools/default/buckets",
"terseBucketsBase": "/pools/default/b/",
"terseStreamingBucketsBase": "/pools/default/bs/"
},
"remoteClusters": {
"uri": "/pools/default/remoteClusters",
"validateURI": "/pools/default/remoteClusters?just_validate=1"
},
"controllers": {
"addNode": {
"uri": "/controller/addNode"
},
"rebalance": {
"uri": "/controller/rebalance"
},
"failOver": {
"uri": "/controller/failOver"
},
"startGracefulFailover": {
"uri": "/controller/startGracefulFailover"
},
"reAddNode": {
"uri": "/controller/reAddNode"
},
"reFailOver": {
"uri": "/controller/reFailOver"
},
"ejectNode": {
"uri": "/controller/ejectNode"
},
"setRecoveryType": {
"uri": "/controller/setRecoveryType"
},
"setAutoCompaction": {
"uri": "/controller/setAutoCompaction",
"validateURI": "/controller/setAutoCompaction?just_validate=1"
},
"clusterLogsCollection": {
"startURI": "/controller/startLogsCollection",
"cancelURI": "/controller/cancelLogsCollection"
},
"replication": {
"createURI": "/controller/createReplication",
"validateURI": "/controller/createReplication?just_validate=1"
},
"setFastWarmup": {
"uri": "/controller/setFastWarmup",
"validateURI": "/controller/setFastWarmup?just_validate=1"
}
},
"rebalanceStatus": "none",
"rebalanceProgressUri": "/pools/default/rebalanceProgress",
"stopRebalanceUri": "/controller/stopRebalance",
"nodeStatusesUri": "/nodeStatuses",
"maxBucketCount": 10,
"autoCompactionSettings": {
"parallelDBAndViewCompaction": false,
"databaseFragmentationThreshold": {
"percentage": 50,
"size": "undefined"
},
"viewFragmentationThreshold": {
"percentage": 50,
"size": "undefined"
}
},
"fastWarmupSettings": {
"fastWarmupEnabled": true,
"minMemoryThreshold": 10,
"minItemsThreshold": 10
},
"tasks": {
"uri": "/pools/default/tasks"
},
"visualSettingsUri": "/internalSettings/visual",
"counters": {
"rebalance_success": 4,
"rebalance_start": 6,
"rebalance_stop": 2
}
}

View file

@ -0,0 +1,8 @@
{
"pools": [
{
"name": "default",
"uri": "/pools/default"
}
]
}

View file

@ -0,0 +1,11 @@
{
"enabled": true,
"timeout": 72,
"count": 1,
"failoverOnDataDiskIssues": {
"enabled": true,
"timePeriod": 89
},
"maxCount": 2,
"canAbortRebalance": true
}