1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,100 @@
# Kibana Input Plugin
This plugin collects metrics about service status from [Kibana][kibana]
instances via the server's API.
> [!NOTE]
> This plugin requires Kibana version 6.0+.
⭐ Telegraf v1.8.0
🏷️ applications, server
💻 all
[kibana]: https://www.elastic.co/kibana
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Read status information from one or more Kibana servers
[[inputs.kibana]]
## Specify a list of one or more Kibana servers
servers = ["http://localhost:5601"]
## Timeout for HTTP requests
timeout = "5s"
## HTTP Basic Auth credentials
# username = "username"
# password = "pa$$word"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## If 'use_system_proxy' is set to true, Telegraf will check env vars such as
## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts).
## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is
## provided, Telegraf will use the specified URL as HTTP proxy.
# use_system_proxy = false
# http_proxy_url = "http://localhost:8888"
```
## Metrics
- kibana
- tags:
- name (Kibana reported name)
- source (Kibana server hostname or IP)
- status (Kibana health: green, yellow, red)
- version (Kibana version)
- fields:
- status_code (integer, green=1 yellow=2 red=3 unknown=0)
- heap_total_bytes (integer)
- heap_max_bytes (integer; deprecated in 1.13.3: use `heap_total_bytes` field)
- heap_used_bytes (integer)
- heap_size_limit (integer)
- uptime_ms (integer)
- response_time_avg_ms (float)
- response_time_max_ms (integer)
- concurrent_connections (integer)
- requests_per_sec (float)
## Example Output
```text
kibana,host=myhost,name=my-kibana,source=localhost:5601,status=green,version=6.5.4 concurrent_connections=8i,heap_max_bytes=447778816i,heap_total_bytes=447778816i,heap_used_bytes=380603352i,requests_per_sec=1,response_time_avg_ms=57.6,response_time_max_ms=220i,status_code=1i,uptime_ms=6717489805i 1534864502000000000
```
## Run example environment
Requires the following tools:
- [Docker](https://docs.docker.com/get-docker/)
- [Docker Compose](https://docs.docker.com/compose/install/)
From the root of this project execute the following script:
`./plugins/inputs/kibana/test_environment/run_test_env.sh`
This will build the latest Telegraf and then start up Kibana and Elasticsearch,
Telegraf will begin monitoring Kibana's status and write its results to the file
`/tmp/metrics.out` in the Telegraf container.
Then you can attach to the telegraf container to inspect the file
`/tmp/metrics.out` to see if the status is being reported.
The Visual Studio Code [Remote - Containers][remote] extension provides an easy
user interface to attach to the running container.
[remote]: https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers

View file

@ -0,0 +1,254 @@
//go:generate ../../../tools/readme_config_includer/generator
package kibana
import (
"context"
_ "embed"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
common_http "github.com/influxdata/telegraf/plugins/common/http"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
const statusPath = "/api/status"
type Kibana struct {
Servers []string `toml:"servers"`
Username string `toml:"username"`
Password string `toml:"password"`
Log telegraf.Logger `toml:"-"`
client *http.Client
common_http.HTTPClientConfig
}
type kibanaStatus struct {
Name string `json:"name"`
Version version `json:"version"`
Status status `json:"status"`
Metrics metrics `json:"metrics"`
}
type version struct {
Number string `json:"number"`
BuildHash string `json:"build_hash"`
BuildNumber int `json:"build_number"`
BuildSnapshot bool `json:"build_snapshot"`
}
type status struct {
Overall overallStatus `json:"overall"`
Statuses interface{} `json:"statuses"`
}
type overallStatus struct {
State string `json:"state"`
}
type metrics struct {
UptimeInMillis float64 `json:"uptime_in_millis"`
ConcurrentConnections int64 `json:"concurrent_connections"`
CollectionIntervalInMilles int64 `json:"collection_interval_in_millis"`
ResponseTimes responseTimes `json:"response_times"`
Process process `json:"process"`
Requests requests `json:"requests"`
}
type responseTimes struct {
AvgInMillis float64 `json:"avg_in_millis"`
MaxInMillis int64 `json:"max_in_millis"`
}
type process struct {
Mem mem `json:"mem"`
Memory memory `json:"memory"`
UptimeInMillis float64 `json:"uptime_in_millis"`
}
type requests struct {
Total int64 `json:"total"`
}
type mem struct {
HeapMaxInBytes int64 `json:"heap_max_in_bytes"`
HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
}
type memory struct {
Heap heap `json:"heap"`
}
type heap struct {
TotalInBytes int64 `json:"total_in_bytes"`
UsedInBytes int64 `json:"used_in_bytes"`
SizeLimit int64 `json:"size_limit"`
}
func (*Kibana) SampleConfig() string {
return sampleConfig
}
func (*Kibana) Start(telegraf.Accumulator) error {
return nil
}
func (k *Kibana) Gather(acc telegraf.Accumulator) error {
if k.client == nil {
client, err := k.createHTTPClient()
if err != nil {
return err
}
k.client = client
}
var wg sync.WaitGroup
wg.Add(len(k.Servers))
for _, serv := range k.Servers {
go func(baseUrl string, acc telegraf.Accumulator) {
defer wg.Done()
if err := k.gatherKibanaStatus(baseUrl, acc); err != nil {
acc.AddError(fmt.Errorf("[url=%s]: %w", baseUrl, err))
return
}
}(serv, acc)
}
wg.Wait()
return nil
}
func (k *Kibana) Stop() {
if k.client != nil {
k.client.CloseIdleConnections()
}
}
func (k *Kibana) createHTTPClient() (*http.Client, error) {
ctx := context.Background()
return k.HTTPClientConfig.CreateClient(ctx, k.Log)
}
func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) error {
kibanaStatus := &kibanaStatus{}
url := baseURL + statusPath
host, err := k.gatherJSONData(url, kibanaStatus)
if err != nil {
return err
}
fields := make(map[string]interface{})
tags := make(map[string]string)
tags["name"] = kibanaStatus.Name
tags["source"] = host
tags["version"] = kibanaStatus.Version.Number
tags["status"] = kibanaStatus.Status.Overall.State
fields["status_code"] = mapHealthStatusToCode(kibanaStatus.Status.Overall.State)
fields["concurrent_connections"] = kibanaStatus.Metrics.ConcurrentConnections
fields["response_time_avg_ms"] = kibanaStatus.Metrics.ResponseTimes.AvgInMillis
fields["response_time_max_ms"] = kibanaStatus.Metrics.ResponseTimes.MaxInMillis
fields["requests_per_sec"] = float64(kibanaStatus.Metrics.Requests.Total) / float64(kibanaStatus.Metrics.CollectionIntervalInMilles) * 1000
versionArray := strings.Split(kibanaStatus.Version.Number, ".")
arrayElement := 1
if len(versionArray) > 1 {
arrayElement = 2
}
versionNumber, err := strconv.ParseFloat(strings.Join(versionArray[:arrayElement], "."), 64)
if err != nil {
return err
}
// Same value will be assigned to both the metrics [heap_max_bytes and heap_total_bytes ]
// Which keeps the code backward compatible
if versionNumber >= 6.4 {
fields["uptime_ms"] = int64(kibanaStatus.Metrics.Process.UptimeInMillis)
fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.TotalInBytes
fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.TotalInBytes
fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.UsedInBytes
fields["heap_size_limit"] = kibanaStatus.Metrics.Process.Memory.Heap.SizeLimit
} else {
fields["uptime_ms"] = int64(kibanaStatus.Metrics.UptimeInMillis)
fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes
fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes
fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapUsedInBytes
}
acc.AddFields("kibana", fields, tags)
return nil
}
func (k *Kibana) gatherJSONData(url string, v interface{}) (host string, err error) {
request, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", fmt.Errorf("unable to create new request %q: %w", url, err)
}
if (k.Username != "") || (k.Password != "") {
request.SetBasicAuth(k.Username, k.Password)
}
response, err := k.client.Do(request)
if err != nil {
return "", err
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
//nolint:errcheck // LimitReader returns io.EOF and we're not interested in read errors.
body, _ := io.ReadAll(io.LimitReader(response.Body, 200))
return request.Host, fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body)
}
if err := json.NewDecoder(response.Body).Decode(v); err != nil {
return request.Host, err
}
return request.Host, nil
}
// perform status mapping
func mapHealthStatusToCode(s string) int {
switch strings.ToLower(s) {
case "green":
return 1
case "yellow":
return 2
case "red":
return 3
}
return 0
}
func newKibana() *Kibana {
return &Kibana{
HTTPClientConfig: common_http.HTTPClientConfig{
Timeout: config.Duration(5 * time.Second),
},
}
}
func init() {
inputs.Add("kibana", func() telegraf.Input {
return newKibana()
})
}

View file

@ -0,0 +1,87 @@
package kibana
import (
"io"
"net/http"
"strings"
"testing"
"github.com/influxdata/telegraf/testutil"
)
func defaultTags6_3() map[string]string {
return map[string]string{
"name": "my-kibana",
"source": "example.com:5601",
"version": "6.3.2",
"status": "green",
}
}
func defaultTags6_5() map[string]string {
return map[string]string{
"name": "my-kibana",
"source": "example.com:5601",
"version": "6.5.4",
"status": "green",
}
}
type transportMock struct {
statusCode int
body string
}
func newTransportMock(statusCode int, body string) http.RoundTripper {
return &transportMock{
statusCode: statusCode,
body: body,
}
}
func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
res := &http.Response{
Header: make(http.Header),
Request: r,
StatusCode: t.statusCode,
}
res.Header.Set("Content-Type", "application/json")
res.Body = io.NopCloser(strings.NewReader(t.body))
return res, nil
}
func checkKibanaStatusResult(version string, t *testing.T, acc *testutil.Accumulator) {
if version == "6.3.2" {
tags := defaultTags6_3()
acc.AssertContainsTaggedFields(t, "kibana", kibanaStatusExpected6_3, tags)
} else {
tags := defaultTags6_5()
acc.AssertContainsTaggedFields(t, "kibana", kibanaStatusExpected6_5, tags)
}
}
func TestGather(t *testing.T) {
ks := newKibanahWithClient()
ks.Servers = []string{"http://example.com:5601"}
// Unit test for Kibana version < 6.4
ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_3)
var acc1 testutil.Accumulator
if err := acc1.GatherError(ks.Gather); err != nil {
t.Fatal(err)
}
checkKibanaStatusResult(defaultTags6_3()["version"], t, &acc1)
// Unit test for Kibana version >= 6.4
ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_5)
var acc2 testutil.Accumulator
if err := acc2.GatherError(ks.Gather); err != nil {
t.Fatal(err)
}
checkKibanaStatusResult(defaultTags6_5()["version"], t, &acc2)
}
func newKibanahWithClient() *Kibana {
ks := newKibana()
ks.client = &http.Client{}
return ks
}

View file

@ -0,0 +1,25 @@
# Read status information from one or more Kibana servers
[[inputs.kibana]]
## Specify a list of one or more Kibana servers
servers = ["http://localhost:5601"]
## Timeout for HTTP requests
timeout = "5s"
## HTTP Basic Auth credentials
# username = "username"
# password = "pa$$word"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## If 'use_system_proxy' is set to true, Telegraf will check env vars such as
## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts).
## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is
## provided, Telegraf will use the specified URL as HTTP proxy.
# use_system_proxy = false
# http_proxy_url = "http://localhost:8888"

View file

@ -0,0 +1,75 @@
# Telegraf Configuration for basic Kibana example
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
# Send telegraf metrics to file(s)
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
###############################################################################
# INPUT PLUGINS #
###############################################################################
# Read status information from one or more Kibana servers
[[inputs.kibana]]
## Specify a list of one or more Kibana servers
servers = ["http://kib01:5601"]
## Timeout for HTTP requests
timeout = "5s"

View file

@ -0,0 +1,48 @@
## Reference: https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-cli-run-dev-mode
version: '2.2'
services:
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.10.1
container_name: es01
environment:
- node.name=es01
- cluster.name=es-docker-cluster
- cluster.initial_master_nodes=es01
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data01:/usr/share/elasticsearch/data
ports:
- 9200:9200
networks:
- elastic
kib01:
image: docker.elastic.co/kibana/kibana:7.10.1
container_name: kib01
ports:
- 5601:5601
environment:
ELASTICSEARCH_URL: http://es01:9200
ELASTICSEARCH_HOSTS: http://es01:9200
networks:
- elastic
telegraf:
image: local_telegraf
volumes:
- ./basic_kibana_telegraf.conf:/etc/telegraf/telegraf.conf:ro
networks:
- elastic
volumes:
data01:
driver: local
networks:
elastic:
driver: bridge

View file

@ -0,0 +1,5 @@
#!/bin/sh
docker build -t local_telegraf -f scripts/alpine.docker .
docker-compose -f plugins/inputs/kibana/test_environment/docker-compose.yml up

View file

@ -0,0 +1,200 @@
package kibana
const kibanaStatusResponse6_3 = `
{
"name": "my-kibana",
"uuid": "00000000-0000-0000-0000-000000000000",
"version": {
"number": "6.3.2",
"build_hash": "53d0c6758ac3fb38a3a1df198c1d4c87765e63f7",
"build_number": 17307,
"build_snapshot": false
},
"status": {
"overall": {
"state": "green",
"title": "Green",
"nickname": "Looking good",
"icon": "success",
"since": "2018-07-27T07:37:42.567Z"
},
"statuses": [{
"id": "plugin:kibana@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.567Z"
},
{
"id": "plugin:elasticsearch@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:04.920Z"
},
{
"id": "plugin:xpack_main@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.393Z"
},
{
"id": "plugin:searchprofiler@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.395Z"
},
{
"id": "plugin:tilemap@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.396Z"
},
{
"id": "plugin:watcher@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.397Z"
},
{
"id": "plugin:license_management@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.668Z"
},
{
"id": "plugin:index_management@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.399Z"
},
{
"id": "plugin:timelion@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.912Z"
},
{
"id": "plugin:logtrail@0.1.29",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.919Z"
},
{
"id": "plugin:monitoring@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.922Z"
},
{
"id": "plugin:grokdebugger@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.400Z"
},
{
"id": "plugin:dashboard_mode@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.928Z"
},
{
"id": "plugin:logstash@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.401Z"
},
{
"id": "plugin:apm@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.950Z"
},
{
"id": "plugin:console@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.958Z"
},
{
"id": "plugin:console_extensions@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.961Z"
},
{
"id": "plugin:metrics@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.965Z"
},
{
"id": "plugin:reporting@6.3.2",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.402Z"
}]
},
"metrics": {
"last_updated": "2018-08-21T11:24:25.823Z",
"collection_interval_in_millis": 5000,
"uptime_in_millis": 2173595336,
"process": {
"mem": {
"heap_max_in_bytes": 149954560,
"heap_used_in_bytes": 126274392
}
},
"os": {
"cpu": {
"load_average": {
"1m": 0.1806640625,
"5m": 0.49658203125,
"15m": 0.458984375
}
}
},
"response_times": {
"avg_in_millis": 12.5,
"max_in_millis": 123
},
"requests": {
"total": 2,
"disconnects": 0,
"status_codes": {
"200": 2
}
},
"concurrent_connections": 10
}
}
`
var kibanaStatusExpected6_3 = map[string]interface{}{
"status_code": 1,
"heap_total_bytes": int64(149954560),
"heap_max_bytes": int64(149954560),
"heap_used_bytes": int64(126274392),
"uptime_ms": int64(2173595336),
"response_time_avg_ms": float64(12.5),
"response_time_max_ms": int64(123),
"concurrent_connections": int64(10),
"requests_per_sec": float64(0.4),
}

View file

@ -0,0 +1,228 @@
package kibana
const kibanaStatusResponse6_5 = `
{
"name": "my-kibana",
"uuid": "00000000-0000-0000-0000-000000000000",
"version": {
"number": "6.5.4",
"build_hash": "53d0c6758ac3fb38a3a1df198c1d4c87765e63f7",
"build_number": 17307,
"build_snapshot": false
},
"status": {
"overall": {
"state": "green",
"title": "Green",
"nickname": "Looking good",
"icon": "success",
"since": "2018-07-27T07:37:42.567Z"
},
"statuses": [{
"id": "plugin:kibana@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.567Z"
},
{
"id": "plugin:elasticsearch@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:04.920Z"
},
{
"id": "plugin:xpack_main@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.393Z"
},
{
"id": "plugin:searchprofiler@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.395Z"
},
{
"id": "plugin:tilemap@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.396Z"
},
{
"id": "plugin:watcher@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.397Z"
},
{
"id": "plugin:license_management@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.668Z"
},
{
"id": "plugin:index_management@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.399Z"
},
{
"id": "plugin:timelion@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.912Z"
},
{
"id": "plugin:logtrail@0.1.29",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.919Z"
},
{
"id": "plugin:monitoring@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.922Z"
},
{
"id": "plugin:grokdebugger@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.400Z"
},
{
"id": "plugin:dashboard_mode@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.928Z"
},
{
"id": "plugin:logstash@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.401Z"
},
{
"id": "plugin:apm@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.950Z"
},
{
"id": "plugin:console@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.958Z"
},
{
"id": "plugin:console_extensions@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.961Z"
},
{
"id": "plugin:metrics@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-27T07:37:42.965Z"
},
{
"id": "plugin:reporting@6.5.4",
"state": "green",
"icon": "success",
"message": "Ready",
"since": "2018-07-28T10:07:02.402Z"
}]
},
"metrics": {
"last_updated": "2020-01-15T09:40:17.733Z",
"collection_interval_in_millis": 5000,
"process": {
"memory": {
"heap": {
"total_in_bytes": 149954560,
"used_in_bytes": 126274392,
"size_limit": 1501560832
},
"resident_set_size_in_bytes": 286650368
},
"event_loop_delay": 0.5314235687255859,
"pid": 6,
"uptime_in_millis": 2173595336.9999999998
},
"os": {
"load": {
"1m": 2.66015625,
"5m": 2.8173828125,
"15m": 2.51025390625
},
"memory": {
"total_in_bytes": 404355756032,
"free_in_bytes": 294494244864,
"used_in_bytes": 109861511168
},
"uptime_in_millis": 8220745000,
"cgroup": {
"cpuacct": {
"control_group": "/",
"usage_nanos": 1086527218898
},
"cpu": {
"control_group": "/",
"cfs_period_micros": 100000,
"cfs_quota_micros": -1,
"stat": {
"number_of_elapsed_periods": 0,
"number_of_times_throttled": 0,
"time_throttled_nanos": 0
}
}
}
},
"response_times": {
"avg_in_millis": 12.5,
"max_in_millis": 123
},
"requests": {
"total": 2,
"disconnects": 0,
"status_codes": {
"200": 1,
"304": 1
}
},
"concurrent_connections": 10
}
}
`
var kibanaStatusExpected6_5 = map[string]interface{}{
"status_code": 1,
"heap_total_bytes": int64(149954560),
"heap_max_bytes": int64(149954560),
"heap_used_bytes": int64(126274392),
"heap_size_limit": int64(1501560832),
"uptime_ms": int64(2173595337),
"response_time_avg_ms": float64(12.5),
"response_time_max_ms": int64(123),
"concurrent_connections": int64(10),
"requests_per_sec": float64(0.4),
}