Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
888
plugins/inputs/elasticsearch/README.md
Normal file
888
plugins/inputs/elasticsearch/README.md
Normal file
|
@ -0,0 +1,888 @@
|
|||
# Elasticsearch Input Plugin
|
||||
|
||||
This plugin queries endpoints of a [Elasticsearch][elastic] instance to obtain
|
||||
[node statistics][node_stats] and optionally [cluster-health][cluster_health]
|
||||
metrics.
|
||||
Additionally, the plugin is able to query [cluster][cluster_stats],
|
||||
[indices and shard][indices_stats] statistics for the master node.
|
||||
|
||||
> [!NOTE]
|
||||
> Specific statistics information can change between Elasticsearch versions. In
|
||||
> general, this plugin attempts to stay as version-generic as possible by
|
||||
> tagging high-level categories only and creating unique field names of
|
||||
> whatever statistics names are provided at the mid-low level.
|
||||
|
||||
⭐ Telegraf v0.1.5
|
||||
🏷️ server
|
||||
💻 all
|
||||
|
||||
[elastic]: https://www.elastic.co/
|
||||
[node_stats]: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html
|
||||
[cluster_health]: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html
|
||||
[cluster_stats]: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html
|
||||
[indices_stats]: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Read stats from one or more Elasticsearch servers or clusters
|
||||
[[inputs.elasticsearch]]
|
||||
## specify a list of one or more Elasticsearch servers
|
||||
## you can add username and password to your url to use basic authentication:
|
||||
## servers = ["http://user:pass@localhost:9200"]
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
## HTTP headers to send with each request
|
||||
# headers = { "X-Custom-Header" = "Custom" }
|
||||
|
||||
## When local is true (the default), the node will read only its own stats.
|
||||
## Set local to false when you want to read the node stats from all nodes
|
||||
## of the cluster.
|
||||
local = true
|
||||
|
||||
## Set cluster_health to true when you want to obtain cluster health stats
|
||||
cluster_health = false
|
||||
|
||||
## Adjust cluster_health_level when you want to obtain detailed health stats
|
||||
## The options are
|
||||
## - indices (default)
|
||||
## - cluster
|
||||
# cluster_health_level = "indices"
|
||||
|
||||
## Set cluster_stats to true when you want to obtain cluster stats.
|
||||
cluster_stats = false
|
||||
|
||||
## Only gather cluster_stats from the master node.
|
||||
## To work this require local = true
|
||||
cluster_stats_only_from_master = true
|
||||
|
||||
## Gather stats from the enrich API
|
||||
# enrich_stats = false
|
||||
|
||||
## Indices to collect; can be one or more indices names or _all
|
||||
## Use of wildcards is allowed. Use a wildcard at the end to retrieve index
|
||||
## names that end with a changing value, like a date.
|
||||
indices_include = ["_all"]
|
||||
|
||||
## One of "shards", "cluster", "indices"
|
||||
## Currently only "shards" is implemented
|
||||
indices_level = "shards"
|
||||
|
||||
## node_stats is a list of sub-stats that you want to have gathered.
|
||||
## Valid options are "indices", "os", "process", "jvm", "thread_pool",
|
||||
## "fs", "transport", "http", "breaker". Per default, all stats are gathered.
|
||||
# node_stats = ["jvm", "http"]
|
||||
|
||||
## HTTP Basic Authentication username and password.
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## If 'use_system_proxy' is set to true, Telegraf will check env vars such as
|
||||
## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts).
|
||||
## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is
|
||||
## provided, Telegraf will use the specified URL as HTTP proxy.
|
||||
# use_system_proxy = false
|
||||
# http_proxy_url = "http://localhost:8888"
|
||||
|
||||
## Sets the number of most recent indices to return for indices that are
|
||||
## configured with a date-stamped suffix. Each 'indices_include' entry
|
||||
## ending with a wildcard (*) or glob matching pattern will group together
|
||||
## all indices that match it, and sort them by the date or number after
|
||||
## the wildcard. Metrics then are gathered for only the
|
||||
## 'num_most_recent_indices' amount of most recent indices.
|
||||
# num_most_recent_indices = 0
|
||||
```
|
||||
|
||||
## Metrics
|
||||
|
||||
Emitted when `cluster_health = true`:
|
||||
|
||||
- elasticsearch_cluster_health
|
||||
- tags:
|
||||
- name
|
||||
- fields:
|
||||
- active_primary_shards (integer)
|
||||
- active_shards (integer)
|
||||
- active_shards_percent_as_number (float)
|
||||
- delayed_unassigned_shards (integer)
|
||||
- initializing_shards (integer)
|
||||
- number_of_data_nodes (integer)
|
||||
- number_of_in_flight_fetch (integer)
|
||||
- number_of_nodes (integer)
|
||||
- number_of_pending_tasks (integer)
|
||||
- relocating_shards (integer)
|
||||
- status (string, one of green, yellow or red)
|
||||
- status_code (integer, green = 1, yellow = 2, red = 3),
|
||||
- task_max_waiting_in_queue_millis (integer)
|
||||
- timed_out (boolean)
|
||||
- unassigned_shards (integer)
|
||||
|
||||
Emitted when `cluster_health = true` and `cluster_health_level = "indices"`:
|
||||
|
||||
- elasticsearch_cluster_health_indices
|
||||
- tags:
|
||||
- index
|
||||
- name
|
||||
- fields:
|
||||
- active_primary_shards (integer)
|
||||
- active_shards (integer)
|
||||
- initializing_shards (integer)
|
||||
- number_of_replicas (integer)
|
||||
- number_of_shards (integer)
|
||||
- relocating_shards (integer)
|
||||
- status (string, one of green, yellow or red)
|
||||
- status_code (integer, green = 1, yellow = 2, red = 3),
|
||||
- unassigned_shards (integer)
|
||||
|
||||
Emitted when `cluster_stats = true`:
|
||||
|
||||
- elasticsearch_clusterstats_indices
|
||||
- tags:
|
||||
- cluster_name
|
||||
- node_name
|
||||
- status
|
||||
- fields:
|
||||
- completion_size_in_bytes (float)
|
||||
- count (float)
|
||||
- docs_count (float)
|
||||
- docs_deleted (float)
|
||||
- fielddata_evictions (float)
|
||||
- fielddata_memory_size_in_bytes (float)
|
||||
- query_cache_cache_count (float)
|
||||
- query_cache_cache_size (float)
|
||||
- query_cache_evictions (float)
|
||||
- query_cache_hit_count (float)
|
||||
- query_cache_memory_size_in_bytes (float)
|
||||
- query_cache_miss_count (float)
|
||||
- query_cache_total_count (float)
|
||||
- segments_count (float)
|
||||
- segments_doc_values_memory_in_bytes (float)
|
||||
- segments_fixed_bit_set_memory_in_bytes (float)
|
||||
- segments_index_writer_memory_in_bytes (float)
|
||||
- segments_max_unsafe_auto_id_timestamp (float)
|
||||
- segments_memory_in_bytes (float)
|
||||
- segments_norms_memory_in_bytes (float)
|
||||
- segments_points_memory_in_bytes (float)
|
||||
- segments_stored_fields_memory_in_bytes (float)
|
||||
- segments_term_vectors_memory_in_bytes (float)
|
||||
- segments_terms_memory_in_bytes (float)
|
||||
- segments_version_map_memory_in_bytes (float)
|
||||
- shards_index_primaries_avg (float)
|
||||
- shards_index_primaries_max (float)
|
||||
- shards_index_primaries_min (float)
|
||||
- shards_index_replication_avg (float)
|
||||
- shards_index_replication_max (float)
|
||||
- shards_index_replication_min (float)
|
||||
- shards_index_shards_avg (float)
|
||||
- shards_index_shards_max (float)
|
||||
- shards_index_shards_min (float)
|
||||
- shards_primaries (float)
|
||||
- shards_replication (float)
|
||||
- shards_total (float)
|
||||
- store_size_in_bytes (float)
|
||||
|
||||
- elasticsearch_clusterstats_nodes
|
||||
- tags:
|
||||
- cluster_name
|
||||
- node_name
|
||||
- status
|
||||
- fields:
|
||||
- count_coordinating_only (float)
|
||||
- count_data (float)
|
||||
- count_ingest (float)
|
||||
- count_master (float)
|
||||
- count_total (float)
|
||||
- fs_available_in_bytes (float)
|
||||
- fs_free_in_bytes (float)
|
||||
- fs_total_in_bytes (float)
|
||||
- jvm_max_uptime_in_millis (float)
|
||||
- jvm_mem_heap_max_in_bytes (float)
|
||||
- jvm_mem_heap_used_in_bytes (float)
|
||||
- jvm_threads (float)
|
||||
- jvm_versions_0_count (float)
|
||||
- jvm_versions_0_version (string)
|
||||
- jvm_versions_0_vm_name (string)
|
||||
- jvm_versions_0_vm_vendor (string)
|
||||
- jvm_versions_0_vm_version (string)
|
||||
- network_types_http_types_security4 (float)
|
||||
- network_types_transport_types_security4 (float)
|
||||
- os_allocated_processors (float)
|
||||
- os_available_processors (float)
|
||||
- os_mem_free_in_bytes (float)
|
||||
- os_mem_free_percent (float)
|
||||
- os_mem_total_in_bytes (float)
|
||||
- os_mem_used_in_bytes (float)
|
||||
- os_mem_used_percent (float)
|
||||
- os_names_0_count (float)
|
||||
- os_names_0_name (string)
|
||||
- os_pretty_names_0_count (float)
|
||||
- os_pretty_names_0_pretty_name (string)
|
||||
- process_cpu_percent (float)
|
||||
- process_open_file_descriptors_avg (float)
|
||||
- process_open_file_descriptors_max (float)
|
||||
- process_open_file_descriptors_min (float)
|
||||
- versions_0 (string)
|
||||
|
||||
Emitted when the appropriate `node_stats` options are set.
|
||||
|
||||
- elasticsearch_transport
|
||||
- tags:
|
||||
- cluster_name
|
||||
- node_attribute_ml.enabled
|
||||
- node_attribute_ml.machine_memory
|
||||
- node_attribute_ml.max_open_jobs
|
||||
- node_attribute_xpack.installed
|
||||
- node_host
|
||||
- node_id
|
||||
- node_name
|
||||
- fields:
|
||||
- rx_count (float)
|
||||
- rx_size_in_bytes (float)
|
||||
- server_open (float)
|
||||
- tx_count (float)
|
||||
- tx_size_in_bytes (float)
|
||||
|
||||
- elasticsearch_breakers
|
||||
- tags:
|
||||
- cluster_name
|
||||
- node_attribute_ml.enabled
|
||||
- node_attribute_ml.machine_memory
|
||||
- node_attribute_ml.max_open_jobs
|
||||
- node_attribute_xpack.installed
|
||||
- node_host
|
||||
- node_id
|
||||
- node_name
|
||||
- fields:
|
||||
- accounting_estimated_size_in_bytes (float)
|
||||
- accounting_limit_size_in_bytes (float)
|
||||
- accounting_overhead (float)
|
||||
- accounting_tripped (float)
|
||||
- fielddata_estimated_size_in_bytes (float)
|
||||
- fielddata_limit_size_in_bytes (float)
|
||||
- fielddata_overhead (float)
|
||||
- fielddata_tripped (float)
|
||||
- in_flight_requests_estimated_size_in_bytes (float)
|
||||
- in_flight_requests_limit_size_in_bytes (float)
|
||||
- in_flight_requests_overhead (float)
|
||||
- in_flight_requests_tripped (float)
|
||||
- parent_estimated_size_in_bytes (float)
|
||||
- parent_limit_size_in_bytes (float)
|
||||
- parent_overhead (float)
|
||||
- parent_tripped (float)
|
||||
- request_estimated_size_in_bytes (float)
|
||||
- request_limit_size_in_bytes (float)
|
||||
- request_overhead (float)
|
||||
- request_tripped (float)
|
||||
|
||||
- elasticsearch_fs
|
||||
- tags:
|
||||
- cluster_name
|
||||
- node_attribute_ml.enabled
|
||||
- node_attribute_ml.machine_memory
|
||||
- node_attribute_ml.max_open_jobs
|
||||
- node_attribute_xpack.installed
|
||||
- node_host
|
||||
- node_id
|
||||
- node_name
|
||||
- fields:
|
||||
- data_0_available_in_bytes (float)
|
||||
- data_0_free_in_bytes (float)
|
||||
- data_0_total_in_bytes (float)
|
||||
- io_stats_devices_0_operations (float)
|
||||
- io_stats_devices_0_read_kilobytes (float)
|
||||
- io_stats_devices_0_read_operations (float)
|
||||
- io_stats_devices_0_write_kilobytes (float)
|
||||
- io_stats_devices_0_write_operations (float)
|
||||
- io_stats_total_operations (float)
|
||||
- io_stats_total_read_kilobytes (float)
|
||||
- io_stats_total_read_operations (float)
|
||||
- io_stats_total_write_kilobytes (float)
|
||||
- io_stats_total_write_operations (float)
|
||||
- timestamp (float)
|
||||
- total_available_in_bytes (float)
|
||||
- total_free_in_bytes (float)
|
||||
- total_total_in_bytes (float)
|
||||
|
||||
- elasticsearch_http
|
||||
- tags:
|
||||
- cluster_name
|
||||
- node_attribute_ml.enabled
|
||||
- node_attribute_ml.machine_memory
|
||||
- node_attribute_ml.max_open_jobs
|
||||
- node_attribute_xpack.installed
|
||||
- node_host
|
||||
- node_id
|
||||
- node_name
|
||||
- fields:
|
||||
- current_open (float)
|
||||
- total_opened (float)
|
||||
|
||||
- elasticsearch_indices
|
||||
- tags:
|
||||
- cluster_name
|
||||
- node_attribute_ml.enabled
|
||||
- node_attribute_ml.machine_memory
|
||||
- node_attribute_ml.max_open_jobs
|
||||
- node_attribute_xpack.installed
|
||||
- node_host
|
||||
- node_id
|
||||
- node_name
|
||||
- fields:
|
||||
- completion_size_in_bytes (float)
|
||||
- docs_count (float)
|
||||
- docs_deleted (float)
|
||||
- fielddata_evictions (float)
|
||||
- fielddata_memory_size_in_bytes (float)
|
||||
- flush_periodic (float)
|
||||
- flush_total (float)
|
||||
- flush_total_time_in_millis (float)
|
||||
- get_current (float)
|
||||
- get_exists_time_in_millis (float)
|
||||
- get_exists_total (float)
|
||||
- get_missing_time_in_millis (float)
|
||||
- get_missing_total (float)
|
||||
- get_time_in_millis (float)
|
||||
- get_total (float)
|
||||
- indexing_delete_current (float)
|
||||
- indexing_delete_time_in_millis (float)
|
||||
- indexing_delete_total (float)
|
||||
- indexing_index_current (float)
|
||||
- indexing_index_failed (float)
|
||||
- indexing_index_time_in_millis (float)
|
||||
- indexing_index_total (float)
|
||||
- indexing_noop_update_total (float)
|
||||
- indexing_throttle_time_in_millis (float)
|
||||
- merges_current (float)
|
||||
- merges_current_docs (float)
|
||||
- merges_current_size_in_bytes (float)
|
||||
- merges_total (float)
|
||||
- merges_total_auto_throttle_in_bytes (float)
|
||||
- merges_total_docs (float)
|
||||
- merges_total_size_in_bytes (float)
|
||||
- merges_total_stopped_time_in_millis (float)
|
||||
- merges_total_throttled_time_in_millis (float)
|
||||
- merges_total_time_in_millis (float)
|
||||
- query_cache_cache_count (float)
|
||||
- query_cache_cache_size (float)
|
||||
- query_cache_evictions (float)
|
||||
- query_cache_hit_count (float)
|
||||
- query_cache_memory_size_in_bytes (float)
|
||||
- query_cache_miss_count (float)
|
||||
- query_cache_total_count (float)
|
||||
- recovery_current_as_source (float)
|
||||
- recovery_current_as_target (float)
|
||||
- recovery_throttle_time_in_millis (float)
|
||||
- refresh_listeners (float)
|
||||
- refresh_total (float)
|
||||
- refresh_total_time_in_millis (float)
|
||||
- request_cache_evictions (float)
|
||||
- request_cache_hit_count (float)
|
||||
- request_cache_memory_size_in_bytes (float)
|
||||
- request_cache_miss_count (float)
|
||||
- search_fetch_current (float)
|
||||
- search_fetch_time_in_millis (float)
|
||||
- search_fetch_total (float)
|
||||
- search_open_contexts (float)
|
||||
- search_query_current (float)
|
||||
- search_query_time_in_millis (float)
|
||||
- search_query_total (float)
|
||||
- search_scroll_current (float)
|
||||
- search_scroll_time_in_millis (float)
|
||||
- search_scroll_total (float)
|
||||
- search_suggest_current (float)
|
||||
- search_suggest_time_in_millis (float)
|
||||
- search_suggest_total (float)
|
||||
- segments_count (float)
|
||||
- segments_doc_values_memory_in_bytes (float)
|
||||
- segments_fixed_bit_set_memory_in_bytes (float)
|
||||
- segments_index_writer_memory_in_bytes (float)
|
||||
- segments_max_unsafe_auto_id_timestamp (float)
|
||||
- segments_memory_in_bytes (float)
|
||||
- segments_norms_memory_in_bytes (float)
|
||||
- segments_points_memory_in_bytes (float)
|
||||
- segments_stored_fields_memory_in_bytes (float)
|
||||
- segments_term_vectors_memory_in_bytes (float)
|
||||
- segments_terms_memory_in_bytes (float)
|
||||
- segments_version_map_memory_in_bytes (float)
|
||||
- store_size_in_bytes (float)
|
||||
- translog_earliest_last_modified_age (float)
|
||||
- translog_operations (float)
|
||||
- translog_size_in_bytes (float)
|
||||
- translog_uncommitted_operations (float)
|
||||
- translog_uncommitted_size_in_bytes (float)
|
||||
- warmer_current (float)
|
||||
- warmer_total (float)
|
||||
- warmer_total_time_in_millis (float)
|
||||
|
||||
- elasticsearch_jvm
|
||||
- tags:
|
||||
- cluster_name
|
||||
- node_attribute_ml.enabled
|
||||
- node_attribute_ml.machine_memory
|
||||
- node_attribute_ml.max_open_jobs
|
||||
- node_attribute_xpack.installed
|
||||
- node_host
|
||||
- node_id
|
||||
- node_name
|
||||
- fields:
|
||||
- buffer_pools_direct_count (float)
|
||||
- buffer_pools_direct_total_capacity_in_bytes (float)
|
||||
- buffer_pools_direct_used_in_bytes (float)
|
||||
- buffer_pools_mapped_count (float)
|
||||
- buffer_pools_mapped_total_capacity_in_bytes (float)
|
||||
- buffer_pools_mapped_used_in_bytes (float)
|
||||
- classes_current_loaded_count (float)
|
||||
- classes_total_loaded_count (float)
|
||||
- classes_total_unloaded_count (float)
|
||||
- gc_collectors_old_collection_count (float)
|
||||
- gc_collectors_old_collection_time_in_millis (float)
|
||||
- gc_collectors_young_collection_count (float)
|
||||
- gc_collectors_young_collection_time_in_millis (float)
|
||||
- mem_heap_committed_in_bytes (float)
|
||||
- mem_heap_max_in_bytes (float)
|
||||
- mem_heap_used_in_bytes (float)
|
||||
- mem_heap_used_percent (float)
|
||||
- mem_non_heap_committed_in_bytes (float)
|
||||
- mem_non_heap_used_in_bytes (float)
|
||||
- mem_pools_old_max_in_bytes (float)
|
||||
- mem_pools_old_peak_max_in_bytes (float)
|
||||
- mem_pools_old_peak_used_in_bytes (float)
|
||||
- mem_pools_old_used_in_bytes (float)
|
||||
- mem_pools_survivor_max_in_bytes (float)
|
||||
- mem_pools_survivor_peak_max_in_bytes (float)
|
||||
- mem_pools_survivor_peak_used_in_bytes (float)
|
||||
- mem_pools_survivor_used_in_bytes (float)
|
||||
- mem_pools_young_max_in_bytes (float)
|
||||
- mem_pools_young_peak_max_in_bytes (float)
|
||||
- mem_pools_young_peak_used_in_bytes (float)
|
||||
- mem_pools_young_used_in_bytes (float)
|
||||
- threads_count (float)
|
||||
- threads_peak_count (float)
|
||||
- timestamp (float)
|
||||
- uptime_in_millis (float)
|
||||
|
||||
- elasticsearch_os
|
||||
- tags:
|
||||
- cluster_name
|
||||
- node_attribute_ml.enabled
|
||||
- node_attribute_ml.machine_memory
|
||||
- node_attribute_ml.max_open_jobs
|
||||
- node_attribute_xpack.installed
|
||||
- node_host
|
||||
- node_id
|
||||
- node_name
|
||||
- fields:
|
||||
- cgroup_cpu_cfs_period_micros (float)
|
||||
- cgroup_cpu_cfs_quota_micros (float)
|
||||
- cgroup_cpu_stat_number_of_elapsed_periods (float)
|
||||
- cgroup_cpu_stat_number_of_times_throttled (float)
|
||||
- cgroup_cpu_stat_time_throttled_nanos (float)
|
||||
- cgroup_cpuacct_usage_nanos (float)
|
||||
- cpu_load_average_15m (float)
|
||||
- cpu_load_average_1m (float)
|
||||
- cpu_load_average_5m (float)
|
||||
- cpu_percent (float)
|
||||
- mem_free_in_bytes (float)
|
||||
- mem_free_percent (float)
|
||||
- mem_total_in_bytes (float)
|
||||
- mem_used_in_bytes (float)
|
||||
- mem_used_percent (float)
|
||||
- swap_free_in_bytes (float)
|
||||
- swap_total_in_bytes (float)
|
||||
- swap_used_in_bytes (float)
|
||||
- timestamp (float)
|
||||
|
||||
- elasticsearch_process
|
||||
- tags:
|
||||
- cluster_name
|
||||
- node_attribute_ml.enabled
|
||||
- node_attribute_ml.machine_memory
|
||||
- node_attribute_ml.max_open_jobs
|
||||
- node_attribute_xpack.installed
|
||||
- node_host
|
||||
- node_id
|
||||
- node_name
|
||||
- fields:
|
||||
- cpu_percent (float)
|
||||
- cpu_total_in_millis (float)
|
||||
- max_file_descriptors (float)
|
||||
- mem_total_virtual_in_bytes (float)
|
||||
- open_file_descriptors (float)
|
||||
- timestamp (float)
|
||||
|
||||
- elasticsearch_thread_pool
|
||||
- tags:
|
||||
- cluster_name
|
||||
- node_attribute_ml.enabled
|
||||
- node_attribute_ml.machine_memory
|
||||
- node_attribute_ml.max_open_jobs
|
||||
- node_attribute_xpack.installed
|
||||
- node_host
|
||||
- node_id
|
||||
- node_name
|
||||
- fields:
|
||||
- analyze_active (float)
|
||||
- analyze_completed (float)
|
||||
- analyze_largest (float)
|
||||
- analyze_queue (float)
|
||||
- analyze_rejected (float)
|
||||
- analyze_threads (float)
|
||||
- ccr_active (float)
|
||||
- ccr_completed (float)
|
||||
- ccr_largest (float)
|
||||
- ccr_queue (float)
|
||||
- ccr_rejected (float)
|
||||
- ccr_threads (float)
|
||||
- fetch_shard_started_active (float)
|
||||
- fetch_shard_started_completed (float)
|
||||
- fetch_shard_started_largest (float)
|
||||
- fetch_shard_started_queue (float)
|
||||
- fetch_shard_started_rejected (float)
|
||||
- fetch_shard_started_threads (float)
|
||||
- fetch_shard_store_active (float)
|
||||
- fetch_shard_store_completed (float)
|
||||
- fetch_shard_store_largest (float)
|
||||
- fetch_shard_store_queue (float)
|
||||
- fetch_shard_store_rejected (float)
|
||||
- fetch_shard_store_threads (float)
|
||||
- flush_active (float)
|
||||
- flush_completed (float)
|
||||
- flush_largest (float)
|
||||
- flush_queue (float)
|
||||
- flush_rejected (float)
|
||||
- flush_threads (float)
|
||||
- force_merge_active (float)
|
||||
- force_merge_completed (float)
|
||||
- force_merge_largest (float)
|
||||
- force_merge_queue (float)
|
||||
- force_merge_rejected (float)
|
||||
- force_merge_threads (float)
|
||||
- generic_active (float)
|
||||
- generic_completed (float)
|
||||
- generic_largest (float)
|
||||
- generic_queue (float)
|
||||
- generic_rejected (float)
|
||||
- generic_threads (float)
|
||||
- get_active (float)
|
||||
- get_completed (float)
|
||||
- get_largest (float)
|
||||
- get_queue (float)
|
||||
- get_rejected (float)
|
||||
- get_threads (float)
|
||||
- index_active (float)
|
||||
- index_completed (float)
|
||||
- index_largest (float)
|
||||
- index_queue (float)
|
||||
- index_rejected (float)
|
||||
- index_threads (float)
|
||||
- listener_active (float)
|
||||
- listener_completed (float)
|
||||
- listener_largest (float)
|
||||
- listener_queue (float)
|
||||
- listener_rejected (float)
|
||||
- listener_threads (float)
|
||||
- management_active (float)
|
||||
- management_completed (float)
|
||||
- management_largest (float)
|
||||
- management_queue (float)
|
||||
- management_rejected (float)
|
||||
- management_threads (float)
|
||||
- ml_autodetect_active (float)
|
||||
- ml_autodetect_completed (float)
|
||||
- ml_autodetect_largest (float)
|
||||
- ml_autodetect_queue (float)
|
||||
- ml_autodetect_rejected (float)
|
||||
- ml_autodetect_threads (float)
|
||||
- ml_datafeed_active (float)
|
||||
- ml_datafeed_completed (float)
|
||||
- ml_datafeed_largest (float)
|
||||
- ml_datafeed_queue (float)
|
||||
- ml_datafeed_rejected (float)
|
||||
- ml_datafeed_threads (float)
|
||||
- ml_utility_active (float)
|
||||
- ml_utility_completed (float)
|
||||
- ml_utility_largest (float)
|
||||
- ml_utility_queue (float)
|
||||
- ml_utility_rejected (float)
|
||||
- ml_utility_threads (float)
|
||||
- refresh_active (float)
|
||||
- refresh_completed (float)
|
||||
- refresh_largest (float)
|
||||
- refresh_queue (float)
|
||||
- refresh_rejected (float)
|
||||
- refresh_threads (float)
|
||||
- rollup_indexing_active (float)
|
||||
- rollup_indexing_completed (float)
|
||||
- rollup_indexing_largest (float)
|
||||
- rollup_indexing_queue (float)
|
||||
- rollup_indexing_rejected (float)
|
||||
- rollup_indexing_threads (float)
|
||||
- search_active (float)
|
||||
- search_completed (float)
|
||||
- search_largest (float)
|
||||
- search_queue (float)
|
||||
- search_rejected (float)
|
||||
- search_threads (float)
|
||||
- search_throttled_active (float)
|
||||
- search_throttled_completed (float)
|
||||
- search_throttled_largest (float)
|
||||
- search_throttled_queue (float)
|
||||
- search_throttled_rejected (float)
|
||||
- search_throttled_threads (float)
|
||||
- security-token-key_active (float)
|
||||
- security-token-key_completed (float)
|
||||
- security-token-key_largest (float)
|
||||
- security-token-key_queue (float)
|
||||
- security-token-key_rejected (float)
|
||||
- security-token-key_threads (float)
|
||||
- snapshot_active (float)
|
||||
- snapshot_completed (float)
|
||||
- snapshot_largest (float)
|
||||
- snapshot_queue (float)
|
||||
- snapshot_rejected (float)
|
||||
- snapshot_threads (float)
|
||||
- warmer_active (float)
|
||||
- warmer_completed (float)
|
||||
- warmer_largest (float)
|
||||
- warmer_queue (float)
|
||||
- warmer_rejected (float)
|
||||
- warmer_threads (float)
|
||||
- watcher_active (float)
|
||||
- watcher_completed (float)
|
||||
- watcher_largest (float)
|
||||
- watcher_queue (float)
|
||||
- watcher_rejected (float)
|
||||
- watcher_threads (float)
|
||||
- write_active (float)
|
||||
- write_completed (float)
|
||||
- write_largest (float)
|
||||
- write_queue (float)
|
||||
- write_rejected (float)
|
||||
- write_threads (float)
|
||||
|
||||
Emitted when the appropriate `indices_stats` options are set.
|
||||
|
||||
- elasticsearch_indices_stats_(primaries|total)
|
||||
- tags:
|
||||
- index_name
|
||||
- fields:
|
||||
- completion_size_in_bytes (float)
|
||||
- docs_count (float)
|
||||
- docs_deleted (float)
|
||||
- fielddata_evictions (float)
|
||||
- fielddata_memory_size_in_bytes (float)
|
||||
- flush_periodic (float)
|
||||
- flush_total (float)
|
||||
- flush_total_time_in_millis (float)
|
||||
- get_current (float)
|
||||
- get_exists_time_in_millis (float)
|
||||
- get_exists_total (float)
|
||||
- get_missing_time_in_millis (float)
|
||||
- get_missing_total (float)
|
||||
- get_time_in_millis (float)
|
||||
- get_total (float)
|
||||
- indexing_delete_current (float)
|
||||
- indexing_delete_time_in_millis (float)
|
||||
- indexing_delete_total (float)
|
||||
- indexing_index_current (float)
|
||||
- indexing_index_failed (float)
|
||||
- indexing_index_time_in_millis (float)
|
||||
- indexing_index_total (float)
|
||||
- indexing_is_throttled (float)
|
||||
- indexing_noop_update_total (float)
|
||||
- indexing_throttle_time_in_millis (float)
|
||||
- merges_current (float)
|
||||
- merges_current_docs (float)
|
||||
- merges_current_size_in_bytes (float)
|
||||
- merges_total (float)
|
||||
- merges_total_auto_throttle_in_bytes (float)
|
||||
- merges_total_docs (float)
|
||||
- merges_total_size_in_bytes (float)
|
||||
- merges_total_stopped_time_in_millis (float)
|
||||
- merges_total_throttled_time_in_millis (float)
|
||||
- merges_total_time_in_millis (float)
|
||||
- query_cache_cache_count (float)
|
||||
- query_cache_cache_size (float)
|
||||
- query_cache_evictions (float)
|
||||
- query_cache_hit_count (float)
|
||||
- query_cache_memory_size_in_bytes (float)
|
||||
- query_cache_miss_count (float)
|
||||
- query_cache_total_count (float)
|
||||
- recovery_current_as_source (float)
|
||||
- recovery_current_as_target (float)
|
||||
- recovery_throttle_time_in_millis (float)
|
||||
- refresh_external_total (float)
|
||||
- refresh_external_total_time_in_millis (float)
|
||||
- refresh_listeners (float)
|
||||
- refresh_total (float)
|
||||
- refresh_total_time_in_millis (float)
|
||||
- request_cache_evictions (float)
|
||||
- request_cache_hit_count (float)
|
||||
- request_cache_memory_size_in_bytes (float)
|
||||
- request_cache_miss_count (float)
|
||||
- search_fetch_current (float)
|
||||
- search_fetch_time_in_millis (float)
|
||||
- search_fetch_total (float)
|
||||
- search_open_contexts (float)
|
||||
- search_query_current (float)
|
||||
- search_query_time_in_millis (float)
|
||||
- search_query_total (float)
|
||||
- search_scroll_current (float)
|
||||
- search_scroll_time_in_millis (float)
|
||||
- search_scroll_total (float)
|
||||
- search_suggest_current (float)
|
||||
- search_suggest_time_in_millis (float)
|
||||
- search_suggest_total (float)
|
||||
- segments_count (float)
|
||||
- segments_doc_values_memory_in_bytes (float)
|
||||
- segments_fixed_bit_set_memory_in_bytes (float)
|
||||
- segments_index_writer_memory_in_bytes (float)
|
||||
- segments_max_unsafe_auto_id_timestamp (float)
|
||||
- segments_memory_in_bytes (float)
|
||||
- segments_norms_memory_in_bytes (float)
|
||||
- segments_points_memory_in_bytes (float)
|
||||
- segments_stored_fields_memory_in_bytes (float)
|
||||
- segments_term_vectors_memory_in_bytes (float)
|
||||
- segments_terms_memory_in_bytes (float)
|
||||
- segments_version_map_memory_in_bytes (float)
|
||||
- store_size_in_bytes (float)
|
||||
- translog_earliest_last_modified_age (float)
|
||||
- translog_operations (float)
|
||||
- translog_size_in_bytes (float)
|
||||
- translog_uncommitted_operations (float)
|
||||
- translog_uncommitted_size_in_bytes (float)
|
||||
- warmer_current (float)
|
||||
- warmer_total (float)
|
||||
- warmer_total_time_in_millis (float)
|
||||
|
||||
Emitted when the appropriate `shards_stats` options are set.
|
||||
|
||||
- elasticsearch_indices_stats_shards_total
|
||||
- fields:
|
||||
- failed (float)
|
||||
- successful (float)
|
||||
- total (float)
|
||||
|
||||
- elasticsearch_indices_stats_shards
|
||||
- tags:
|
||||
- index_name
|
||||
- node_name
|
||||
- shard_name
|
||||
- type
|
||||
- fields:
|
||||
- commit_generation (float)
|
||||
- commit_num_docs (float)
|
||||
- completion_size_in_bytes (float)
|
||||
- docs_count (float)
|
||||
- docs_deleted (float)
|
||||
- fielddata_evictions (float)
|
||||
- fielddata_memory_size_in_bytes (float)
|
||||
- flush_periodic (float)
|
||||
- flush_total (float)
|
||||
- flush_total_time_in_millis (float)
|
||||
- get_current (float)
|
||||
- get_exists_time_in_millis (float)
|
||||
- get_exists_total (float)
|
||||
- get_missing_time_in_millis (float)
|
||||
- get_missing_total (float)
|
||||
- get_time_in_millis (float)
|
||||
- get_total (float)
|
||||
- indexing_delete_current (float)
|
||||
- indexing_delete_time_in_millis (float)
|
||||
- indexing_delete_total (float)
|
||||
- indexing_index_current (float)
|
||||
- indexing_index_failed (float)
|
||||
- indexing_index_time_in_millis (float)
|
||||
- indexing_index_total (float)
|
||||
- indexing_is_throttled (bool)
|
||||
- indexing_noop_update_total (float)
|
||||
- indexing_throttle_time_in_millis (float)
|
||||
- merges_current (float)
|
||||
- merges_current_docs (float)
|
||||
- merges_current_size_in_bytes (float)
|
||||
- merges_total (float)
|
||||
- merges_total_auto_throttle_in_bytes (float)
|
||||
- merges_total_docs (float)
|
||||
- merges_total_size_in_bytes (float)
|
||||
- merges_total_stopped_time_in_millis (float)
|
||||
- merges_total_throttled_time_in_millis (float)
|
||||
- merges_total_time_in_millis (float)
|
||||
- query_cache_cache_count (float)
|
||||
- query_cache_cache_size (float)
|
||||
- query_cache_evictions (float)
|
||||
- query_cache_hit_count (float)
|
||||
- query_cache_memory_size_in_bytes (float)
|
||||
- query_cache_miss_count (float)
|
||||
- query_cache_total_count (float)
|
||||
- recovery_current_as_source (float)
|
||||
- recovery_current_as_target (float)
|
||||
- recovery_throttle_time_in_millis (float)
|
||||
- refresh_external_total (float)
|
||||
- refresh_external_total_time_in_millis (float)
|
||||
- refresh_listeners (float)
|
||||
- refresh_total (float)
|
||||
- refresh_total_time_in_millis (float)
|
||||
- request_cache_evictions (float)
|
||||
- request_cache_hit_count (float)
|
||||
- request_cache_memory_size_in_bytes (float)
|
||||
- request_cache_miss_count (float)
|
||||
- retention_leases_primary_term (float)
|
||||
- retention_leases_version (float)
|
||||
- routing_state (int)
|
||||
(UNASSIGNED = 1, INITIALIZING = 2, STARTED = 3, RELOCATING = 4, other = 0)
|
||||
- search_fetch_current (float)
|
||||
- search_fetch_time_in_millis (float)
|
||||
- search_fetch_total (float)
|
||||
- search_open_contexts (float)
|
||||
- search_query_current (float)
|
||||
- search_query_time_in_millis (float)
|
||||
- search_query_total (float)
|
||||
- search_scroll_current (float)
|
||||
- search_scroll_time_in_millis (float)
|
||||
- search_scroll_total (float)
|
||||
- search_suggest_current (float)
|
||||
- search_suggest_time_in_millis (float)
|
||||
- search_suggest_total (float)
|
||||
- segments_count (float)
|
||||
- segments_doc_values_memory_in_bytes (float)
|
||||
- segments_fixed_bit_set_memory_in_bytes (float)
|
||||
- segments_index_writer_memory_in_bytes (float)
|
||||
- segments_max_unsafe_auto_id_timestamp (float)
|
||||
- segments_memory_in_bytes (float)
|
||||
- segments_norms_memory_in_bytes (float)
|
||||
- segments_points_memory_in_bytes (float)
|
||||
- segments_stored_fields_memory_in_bytes (float)
|
||||
- segments_term_vectors_memory_in_bytes (float)
|
||||
- segments_terms_memory_in_bytes (float)
|
||||
- segments_version_map_memory_in_bytes (float)
|
||||
- seq_no_global_checkpoint (float)
|
||||
- seq_no_local_checkpoint (float)
|
||||
- seq_no_max_seq_no (float)
|
||||
- shard_path_is_custom_data_path (bool)
|
||||
- store_size_in_bytes (float)
|
||||
- translog_earliest_last_modified_age (float)
|
||||
- translog_operations (float)
|
||||
- translog_size_in_bytes (float)
|
||||
- translog_uncommitted_operations (float)
|
||||
- translog_uncommitted_size_in_bytes (float)
|
||||
- warmer_current (float)
|
||||
- warmer_total (float)
|
||||
- warmer_total_time_in_millis (float)
|
||||
|
||||
## Example Output
|
787
plugins/inputs/elasticsearch/elasticsearch.go
Normal file
787
plugins/inputs/elasticsearch/elasticsearch.go
Normal file
|
@ -0,0 +1,787 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
parsers_json "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
// mask for masking username/password from error messages
|
||||
var mask = regexp.MustCompile(`https?:\/\/\S+:\S+@`)
|
||||
|
||||
const (
|
||||
// Node stats are always generated, so simply define a constant for these endpoints
|
||||
statsPath = "/_nodes/stats"
|
||||
statsPathLocal = "/_nodes/_local/stats"
|
||||
)
|
||||
|
||||
type Elasticsearch struct {
|
||||
Local bool `toml:"local"`
|
||||
Servers []string `toml:"servers"`
|
||||
HTTPHeaders map[string]string `toml:"headers"`
|
||||
HTTPTimeout config.Duration `toml:"http_timeout" deprecated:"1.29.0;1.35.0;use 'timeout' instead"`
|
||||
ClusterHealth bool `toml:"cluster_health"`
|
||||
ClusterHealthLevel string `toml:"cluster_health_level"`
|
||||
ClusterStats bool `toml:"cluster_stats"`
|
||||
ClusterStatsOnlyFromMaster bool `toml:"cluster_stats_only_from_master"`
|
||||
EnrichStats bool `toml:"enrich_stats"`
|
||||
IndicesInclude []string `toml:"indices_include"`
|
||||
IndicesLevel string `toml:"indices_level"`
|
||||
NodeStats []string `toml:"node_stats"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
NumMostRecentIndices int `toml:"num_most_recent_indices"`
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
client *http.Client
|
||||
common_http.HTTPClientConfig
|
||||
|
||||
serverInfo map[string]serverInfo
|
||||
serverInfoMutex sync.Mutex
|
||||
indexMatchers map[string]filter.Filter
|
||||
}
|
||||
|
||||
type nodeStat struct {
|
||||
Host string `json:"host"`
|
||||
Name string `json:"name"`
|
||||
Roles []string `json:"roles"`
|
||||
Attributes map[string]string `json:"attributes"`
|
||||
Indices interface{} `json:"indices"`
|
||||
OS interface{} `json:"os"`
|
||||
Process interface{} `json:"process"`
|
||||
JVM interface{} `json:"jvm"`
|
||||
ThreadPool interface{} `json:"thread_pool"`
|
||||
FS interface{} `json:"fs"`
|
||||
Transport interface{} `json:"transport"`
|
||||
HTTP interface{} `json:"http"`
|
||||
Breakers interface{} `json:"breakers"`
|
||||
}
|
||||
|
||||
type clusterHealth struct {
|
||||
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"`
|
||||
ClusterName string `json:"cluster_name"`
|
||||
DelayedUnassignedShards int `json:"delayed_unassigned_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
NumberOfDataNodes int `json:"number_of_data_nodes"`
|
||||
NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"`
|
||||
NumberOfNodes int `json:"number_of_nodes"`
|
||||
NumberOfPendingTasks int `json:"number_of_pending_tasks"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
Status string `json:"status"`
|
||||
TaskMaxWaitingInQueueMillis int `json:"task_max_waiting_in_queue_millis"`
|
||||
TimedOut bool `json:"timed_out"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
Indices map[string]indexHealth `json:"indices"`
|
||||
}
|
||||
|
||||
type enrichStats struct {
|
||||
CoordinatorStats []struct {
|
||||
NodeID string `json:"node_id"`
|
||||
QueueSize int `json:"queue_size"`
|
||||
RemoteRequestsCurrent int `json:"remote_requests_current"`
|
||||
RemoteRequestsTotal int `json:"remote_requests_total"`
|
||||
ExecutedSearchesTotal int `json:"executed_searches_total"`
|
||||
} `json:"coordinator_stats"`
|
||||
CacheStats []struct {
|
||||
NodeID string `json:"node_id"`
|
||||
Count int `json:"count"`
|
||||
Hits int64 `json:"hits"`
|
||||
Misses int `json:"misses"`
|
||||
Evictions int `json:"evictions"`
|
||||
} `json:"cache_stats"`
|
||||
}
|
||||
|
||||
type indexHealth struct {
|
||||
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
NumberOfReplicas int `json:"number_of_replicas"`
|
||||
NumberOfShards int `json:"number_of_shards"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
Status string `json:"status"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
}
|
||||
|
||||
type clusterStats struct {
|
||||
NodeName string `json:"node_name"`
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Status string `json:"status"`
|
||||
Indices interface{} `json:"indices"`
|
||||
Nodes interface{} `json:"nodes"`
|
||||
}
|
||||
|
||||
type indexStat struct {
|
||||
Primaries interface{} `json:"primaries"`
|
||||
Total interface{} `json:"total"`
|
||||
Shards map[string][]interface{} `json:"shards"`
|
||||
}
|
||||
type serverInfo struct {
|
||||
nodeID string
|
||||
masterID string
|
||||
}
|
||||
|
||||
func (*Elasticsearch) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) Init() error {
|
||||
// Compile the configured indexes to match for sorting.
|
||||
indexMatchers, err := e.compileIndexMatchers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e.indexMatchers = indexMatchers
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*Elasticsearch) Start(telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
if e.client == nil {
|
||||
client, err := e.createHTTPClient()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.client = client
|
||||
}
|
||||
|
||||
if e.ClusterStats || len(e.IndicesInclude) > 0 || len(e.IndicesLevel) > 0 {
|
||||
var wgC sync.WaitGroup
|
||||
wgC.Add(len(e.Servers))
|
||||
|
||||
e.serverInfo = make(map[string]serverInfo)
|
||||
for _, serv := range e.Servers {
|
||||
go func(s string, acc telegraf.Accumulator) {
|
||||
defer wgC.Done()
|
||||
info := serverInfo{}
|
||||
|
||||
var err error
|
||||
|
||||
// Gather node ID
|
||||
if info.nodeID, err = e.gatherNodeID(s + "/_nodes/_local/name"); err != nil {
|
||||
acc.AddError(errors.New(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
|
||||
// get cat/master information here so NodeStats can determine
|
||||
// whether this node is the Master
|
||||
if info.masterID, err = e.getCatMaster(s + "/_cat/master"); err != nil {
|
||||
acc.AddError(errors.New(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
|
||||
e.serverInfoMutex.Lock()
|
||||
e.serverInfo[s] = info
|
||||
e.serverInfoMutex.Unlock()
|
||||
}(serv, acc)
|
||||
}
|
||||
wgC.Wait()
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(e.Servers))
|
||||
|
||||
for _, serv := range e.Servers {
|
||||
go func(s string, acc telegraf.Accumulator) {
|
||||
defer wg.Done()
|
||||
url := e.nodeStatsURL(s)
|
||||
|
||||
// Always gather node stats
|
||||
if err := e.gatherNodeStats(url, acc); err != nil {
|
||||
acc.AddError(errors.New(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
|
||||
if e.ClusterHealth {
|
||||
url = s + "/_cluster/health"
|
||||
if e.ClusterHealthLevel != "" {
|
||||
url = url + "?level=" + e.ClusterHealthLevel
|
||||
}
|
||||
if err := e.gatherClusterHealth(url, acc); err != nil {
|
||||
acc.AddError(errors.New(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if e.ClusterStats && (e.serverInfo[s].isMaster() || !e.ClusterStatsOnlyFromMaster || !e.Local) {
|
||||
if err := e.gatherClusterStats(s+"/_cluster/stats", acc); err != nil {
|
||||
acc.AddError(errors.New(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(e.IndicesInclude) > 0 && (e.serverInfo[s].isMaster() || !e.ClusterStatsOnlyFromMaster || !e.Local) {
|
||||
if e.IndicesLevel != "shards" {
|
||||
if err := e.gatherIndicesStats(s+"/"+strings.Join(e.IndicesInclude, ",")+"/_stats", acc); err != nil {
|
||||
acc.AddError(errors.New(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err := e.gatherIndicesStats(s+"/"+strings.Join(e.IndicesInclude, ",")+"/_stats?level=shards", acc); err != nil {
|
||||
acc.AddError(errors.New(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if e.EnrichStats {
|
||||
if err := e.gatherEnrichStats(s+"/_enrich/_stats", acc); err != nil {
|
||||
acc.AddError(errors.New(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
}
|
||||
}(serv, acc)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) Stop() {
|
||||
if e.client != nil {
|
||||
e.client.CloseIdleConnections()
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) createHTTPClient() (*http.Client, error) {
|
||||
ctx := context.Background()
|
||||
if e.HTTPTimeout != 0 {
|
||||
e.HTTPClientConfig.Timeout = e.HTTPTimeout
|
||||
e.HTTPClientConfig.ResponseHeaderTimeout = e.HTTPTimeout
|
||||
}
|
||||
return e.HTTPClientConfig.CreateClient(ctx, e.Log)
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) nodeStatsURL(baseURL string) string {
|
||||
var url string
|
||||
|
||||
if e.Local {
|
||||
url = baseURL + statsPathLocal
|
||||
} else {
|
||||
url = baseURL + statsPath
|
||||
}
|
||||
|
||||
if len(e.NodeStats) == 0 {
|
||||
return url
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/%s", url, strings.Join(e.NodeStats, ","))
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherNodeID(url string) (string, error) {
|
||||
nodeStats := &struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Nodes map[string]*nodeStat `json:"nodes"`
|
||||
}{}
|
||||
if err := e.gatherJSONData(url, nodeStats); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Only 1 should be returned
|
||||
for id := range nodeStats.Nodes {
|
||||
return id, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
||||
nodeStats := &struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Nodes map[string]*nodeStat `json:"nodes"`
|
||||
}{}
|
||||
if err := e.gatherJSONData(url, nodeStats); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for id, n := range nodeStats.Nodes {
|
||||
sort.Strings(n.Roles)
|
||||
tags := map[string]string{
|
||||
"node_id": id,
|
||||
"node_host": n.Host,
|
||||
"node_name": n.Name,
|
||||
"cluster_name": nodeStats.ClusterName,
|
||||
"node_roles": strings.Join(n.Roles, ","),
|
||||
}
|
||||
|
||||
for k, v := range n.Attributes {
|
||||
tags["node_attribute_"+k] = v
|
||||
}
|
||||
|
||||
stats := map[string]interface{}{
|
||||
"indices": n.Indices,
|
||||
"os": n.OS,
|
||||
"process": n.Process,
|
||||
"jvm": n.JVM,
|
||||
"thread_pool": n.ThreadPool,
|
||||
"fs": n.FS,
|
||||
"transport": n.Transport,
|
||||
"http": n.HTTP,
|
||||
"breakers": n.Breakers,
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for p, s := range stats {
|
||||
// if one of the individual node stats is not even in the
|
||||
// original result
|
||||
if s == nil {
|
||||
continue
|
||||
}
|
||||
f := parsers_json.JSONFlattener{}
|
||||
// parse Json, ignoring strings and bools
|
||||
err := f.FlattenJSON("", s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("elasticsearch_"+p, f.Fields, tags, now)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator) error {
|
||||
healthStats := &clusterHealth{}
|
||||
if err := e.gatherJSONData(url, healthStats); err != nil {
|
||||
return err
|
||||
}
|
||||
measurementTime := time.Now()
|
||||
clusterFields := map[string]interface{}{
|
||||
"active_primary_shards": healthStats.ActivePrimaryShards,
|
||||
"active_shards": healthStats.ActiveShards,
|
||||
"active_shards_percent_as_number": healthStats.ActiveShardsPercentAsNumber,
|
||||
"delayed_unassigned_shards": healthStats.DelayedUnassignedShards,
|
||||
"initializing_shards": healthStats.InitializingShards,
|
||||
"number_of_data_nodes": healthStats.NumberOfDataNodes,
|
||||
"number_of_in_flight_fetch": healthStats.NumberOfInFlightFetch,
|
||||
"number_of_nodes": healthStats.NumberOfNodes,
|
||||
"number_of_pending_tasks": healthStats.NumberOfPendingTasks,
|
||||
"relocating_shards": healthStats.RelocatingShards,
|
||||
"status": healthStats.Status,
|
||||
"status_code": mapHealthStatusToCode(healthStats.Status),
|
||||
"task_max_waiting_in_queue_millis": healthStats.TaskMaxWaitingInQueueMillis,
|
||||
"timed_out": healthStats.TimedOut,
|
||||
"unassigned_shards": healthStats.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_cluster_health",
|
||||
clusterFields,
|
||||
map[string]string{"name": healthStats.ClusterName},
|
||||
measurementTime,
|
||||
)
|
||||
|
||||
for name, health := range healthStats.Indices {
|
||||
indexFields := map[string]interface{}{
|
||||
"active_primary_shards": health.ActivePrimaryShards,
|
||||
"active_shards": health.ActiveShards,
|
||||
"initializing_shards": health.InitializingShards,
|
||||
"number_of_replicas": health.NumberOfReplicas,
|
||||
"number_of_shards": health.NumberOfShards,
|
||||
"relocating_shards": health.RelocatingShards,
|
||||
"status": health.Status,
|
||||
"status_code": mapHealthStatusToCode(health.Status),
|
||||
"unassigned_shards": health.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_cluster_health_indices",
|
||||
indexFields,
|
||||
map[string]string{"index": name, "name": healthStats.ClusterName},
|
||||
measurementTime,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherEnrichStats(url string, acc telegraf.Accumulator) error {
|
||||
enrichStats := &enrichStats{}
|
||||
if err := e.gatherJSONData(url, enrichStats); err != nil {
|
||||
return err
|
||||
}
|
||||
measurementTime := time.Now()
|
||||
|
||||
for _, coordinator := range enrichStats.CoordinatorStats {
|
||||
coordinatorFields := map[string]interface{}{
|
||||
"queue_size": coordinator.QueueSize,
|
||||
"remote_requests_current": coordinator.RemoteRequestsCurrent,
|
||||
"remote_requests_total": coordinator.RemoteRequestsTotal,
|
||||
"executed_searches_total": coordinator.ExecutedSearchesTotal,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_enrich_stats_coordinator",
|
||||
coordinatorFields,
|
||||
map[string]string{"node_id": coordinator.NodeID},
|
||||
measurementTime,
|
||||
)
|
||||
}
|
||||
|
||||
for _, cache := range enrichStats.CacheStats {
|
||||
cacheFields := map[string]interface{}{
|
||||
"count": cache.Count,
|
||||
"hits": cache.Hits,
|
||||
"misses": cache.Misses,
|
||||
"evictions": cache.Evictions,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_enrich_stats_cache",
|
||||
cacheFields,
|
||||
map[string]string{"node_id": cache.NodeID},
|
||||
measurementTime,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error {
|
||||
clusterStats := &clusterStats{}
|
||||
if err := e.gatherJSONData(url, clusterStats); err != nil {
|
||||
return err
|
||||
}
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"node_name": clusterStats.NodeName,
|
||||
"cluster_name": clusterStats.ClusterName,
|
||||
"status": clusterStats.Status,
|
||||
}
|
||||
|
||||
stats := map[string]interface{}{
|
||||
"nodes": clusterStats.Nodes,
|
||||
"indices": clusterStats.Indices,
|
||||
}
|
||||
|
||||
for p, s := range stats {
|
||||
f := parsers_json.JSONFlattener{}
|
||||
// parse json, including bools and strings
|
||||
err := f.FullFlattenJSON("", s, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("elasticsearch_clusterstats_"+p, f.Fields, tags, now)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator) error {
|
||||
indicesStats := &struct {
|
||||
Shards map[string]interface{} `json:"_shards"`
|
||||
All map[string]interface{} `json:"_all"`
|
||||
Indices map[string]indexStat `json:"indices"`
|
||||
}{}
|
||||
|
||||
if err := e.gatherJSONData(url, indicesStats); err != nil {
|
||||
return err
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
// Total Shards Stats
|
||||
shardsStats := make(map[string]interface{}, len(indicesStats.Shards))
|
||||
for k, v := range indicesStats.Shards {
|
||||
shardsStats[k] = v
|
||||
}
|
||||
acc.AddFields("elasticsearch_indices_stats_shards_total", shardsStats, make(map[string]string), now)
|
||||
|
||||
// All Stats
|
||||
for m, s := range indicesStats.All {
|
||||
// parse Json, ignoring strings and bools
|
||||
jsonParser := parsers_json.JSONFlattener{}
|
||||
err := jsonParser.FullFlattenJSON("_", s, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("elasticsearch_indices_stats_"+m, jsonParser.Fields, map[string]string{"index_name": "_all"}, now)
|
||||
}
|
||||
|
||||
// Gather stats for each index.
|
||||
err := e.gatherIndividualIndicesStats(indicesStats.Indices, now, acc)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// gatherSortedIndicesStats gathers stats for all indices in no particular order.
|
||||
func (e *Elasticsearch) gatherIndividualIndicesStats(indices map[string]indexStat, now time.Time, acc telegraf.Accumulator) error {
|
||||
// Sort indices into buckets based on their configured prefix, if any matches.
|
||||
categorizedIndexNames := e.categorizeIndices(indices)
|
||||
for _, matchingIndices := range categorizedIndexNames {
|
||||
// Establish the number of each category of indices to use. User can configure to use only the latest 'X' amount.
|
||||
indicesCount := len(matchingIndices)
|
||||
indicesToTrackCount := indicesCount
|
||||
|
||||
// Sort the indices if configured to do so.
|
||||
if e.NumMostRecentIndices > 0 {
|
||||
if e.NumMostRecentIndices < indicesToTrackCount {
|
||||
indicesToTrackCount = e.NumMostRecentIndices
|
||||
}
|
||||
sort.Strings(matchingIndices)
|
||||
}
|
||||
|
||||
// Gather only the number of indexes that have been configured, in descending order (most recent, if date-stamped).
|
||||
for i := indicesCount - 1; i >= indicesCount-indicesToTrackCount; i-- {
|
||||
indexName := matchingIndices[i]
|
||||
|
||||
err := e.gatherSingleIndexStats(indexName, indices[indexName], now, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) map[string][]string {
|
||||
categorizedIndexNames := make(map[string][]string, len(indices))
|
||||
|
||||
// If all indices are configured to be gathered, bucket them all together.
|
||||
if len(e.IndicesInclude) == 0 || e.IndicesInclude[0] == "_all" {
|
||||
for indexName := range indices {
|
||||
categorizedIndexNames["_all"] = append(categorizedIndexNames["_all"], indexName)
|
||||
}
|
||||
|
||||
return categorizedIndexNames
|
||||
}
|
||||
|
||||
// Bucket each returned index with its associated configured index (if any match).
|
||||
for indexName := range indices {
|
||||
match := indexName
|
||||
for name, matcher := range e.indexMatchers {
|
||||
// If a configured index matches one of the returned indexes, mark it as a match.
|
||||
if matcher.Match(match) {
|
||||
match = name
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Bucket all matching indices together for sorting.
|
||||
categorizedIndexNames[match] = append(categorizedIndexNames[match], indexName)
|
||||
}
|
||||
|
||||
return categorizedIndexNames
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now time.Time, acc telegraf.Accumulator) error {
|
||||
indexTag := map[string]string{"index_name": name}
|
||||
stats := map[string]interface{}{
|
||||
"primaries": index.Primaries,
|
||||
"total": index.Total,
|
||||
}
|
||||
for m, s := range stats {
|
||||
f := parsers_json.JSONFlattener{}
|
||||
// parse Json, getting strings and bools
|
||||
err := f.FullFlattenJSON("", s, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("elasticsearch_indices_stats_"+m, f.Fields, indexTag, now)
|
||||
}
|
||||
|
||||
if e.IndicesLevel == "shards" {
|
||||
for shardNumber, shards := range index.Shards {
|
||||
for _, shard := range shards {
|
||||
// Get Shard Stats
|
||||
flattened := parsers_json.JSONFlattener{}
|
||||
err := flattened.FullFlattenJSON("", shard, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// determine shard tag and primary/replica designation
|
||||
shardType := "replica"
|
||||
routingPrimary, _ := flattened.Fields["routing_primary"].(bool)
|
||||
if routingPrimary {
|
||||
shardType = "primary"
|
||||
}
|
||||
delete(flattened.Fields, "routing_primary")
|
||||
|
||||
routingState, ok := flattened.Fields["routing_state"].(string)
|
||||
if ok {
|
||||
flattened.Fields["routing_state"] = mapShardStatusToCode(routingState)
|
||||
}
|
||||
|
||||
routingNode, _ := flattened.Fields["routing_node"].(string)
|
||||
shardTags := map[string]string{
|
||||
"index_name": name,
|
||||
"node_id": routingNode,
|
||||
"shard_name": shardNumber,
|
||||
"type": shardType,
|
||||
}
|
||||
|
||||
for key, field := range flattened.Fields {
|
||||
switch field.(type) {
|
||||
case string, bool:
|
||||
delete(flattened.Fields, key)
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("elasticsearch_indices_stats_shards",
|
||||
flattened.Fields,
|
||||
shardTags,
|
||||
now)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) getCatMaster(url string) (string, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if e.Username != "" || e.Password != "" {
|
||||
req.SetBasicAuth(e.Username, e.Password)
|
||||
}
|
||||
|
||||
for key, value := range e.HTTPHeaders {
|
||||
req.Header.Add(key, value)
|
||||
}
|
||||
|
||||
r, err := e.client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
|
||||
// to let the underlying transport close the connection and re-establish a new one for
|
||||
// future calls.
|
||||
return "", fmt.Errorf(
|
||||
"elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d",
|
||||
r.StatusCode,
|
||||
http.StatusOK,
|
||||
)
|
||||
}
|
||||
response, err := io.ReadAll(r.Body)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
masterID := strings.Split(string(response), " ")[0]
|
||||
|
||||
return masterID, nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if e.Username != "" || e.Password != "" {
|
||||
req.SetBasicAuth(e.Username, e.Password)
|
||||
}
|
||||
|
||||
for key, value := range e.HTTPHeaders {
|
||||
req.Header.Add(key, value)
|
||||
}
|
||||
|
||||
r, err := e.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
|
||||
// to let the underlying transport close the connection and re-establish a new one for
|
||||
// future calls.
|
||||
return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d",
|
||||
r.StatusCode, http.StatusOK)
|
||||
}
|
||||
|
||||
return json.NewDecoder(r.Body).Decode(v)
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) compileIndexMatchers() (map[string]filter.Filter, error) {
|
||||
var err error
|
||||
indexMatchers := make(map[string]filter.Filter, len(e.IndicesInclude))
|
||||
|
||||
// Compile each configured index into a glob matcher.
|
||||
for _, configuredIndex := range e.IndicesInclude {
|
||||
if _, exists := indexMatchers[configuredIndex]; !exists {
|
||||
indexMatchers[configuredIndex], err = filter.Compile([]string{configuredIndex})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return indexMatchers, nil
|
||||
}
|
||||
|
||||
func (i serverInfo) isMaster() bool {
|
||||
return i.nodeID == i.masterID
|
||||
}
|
||||
|
||||
// perform status mapping
|
||||
func mapHealthStatusToCode(s string) int {
|
||||
switch strings.ToLower(s) {
|
||||
case "green":
|
||||
return 1
|
||||
case "yellow":
|
||||
return 2
|
||||
case "red":
|
||||
return 3
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// perform shard status mapping
|
||||
func mapShardStatusToCode(s string) int {
|
||||
switch strings.ToUpper(s) {
|
||||
case "UNASSIGNED":
|
||||
return 1
|
||||
case "INITIALIZING":
|
||||
return 2
|
||||
case "STARTED":
|
||||
return 3
|
||||
case "RELOCATING":
|
||||
return 4
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func newElasticsearch() *Elasticsearch {
|
||||
return &Elasticsearch{
|
||||
ClusterStatsOnlyFromMaster: true,
|
||||
ClusterHealthLevel: "indices",
|
||||
HTTPClientConfig: common_http.HTTPClientConfig{
|
||||
ResponseHeaderTimeout: config.Duration(5 * time.Second),
|
||||
Timeout: config.Duration(5 * time.Second),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("elasticsearch", func() telegraf.Input {
|
||||
return newElasticsearch()
|
||||
})
|
||||
}
|
373
plugins/inputs/elasticsearch/elasticsearch_test.go
Normal file
373
plugins/inputs/elasticsearch/elasticsearch_test.go
Normal file
|
@ -0,0 +1,373 @@
|
|||
package elasticsearch
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func defaultTags() map[string]string {
|
||||
return map[string]string{
|
||||
"cluster_name": "es-testcluster",
|
||||
"node_attribute_master": "true",
|
||||
"node_id": "SDFsfSDFsdfFSDSDfSFDSDF",
|
||||
"node_name": "test.host.com",
|
||||
"node_host": "test",
|
||||
"node_roles": "data,ingest,master",
|
||||
}
|
||||
}
|
||||
func defaultServerInfo() serverInfo {
|
||||
return serverInfo{nodeID: "", masterID: "SDFsfSDFsdfFSDSDfSFDSDF"}
|
||||
}
|
||||
|
||||
type transportMock struct {
|
||||
statusCode int
|
||||
body string
|
||||
}
|
||||
|
||||
func newTransportMock(body string) http.RoundTripper {
|
||||
return &transportMock{
|
||||
statusCode: http.StatusOK,
|
||||
body: body,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{
|
||||
Header: make(http.Header),
|
||||
Request: r,
|
||||
StatusCode: t.statusCode,
|
||||
}
|
||||
res.Header.Set("Content-Type", "application/json")
|
||||
res.Body = io.NopCloser(strings.NewReader(t.body))
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) {
|
||||
tags := defaultTags()
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", nodestatsJvmExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_http", nodestatsHTTPExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags)
|
||||
}
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(nodeStatsResponse)
|
||||
es.serverInfo = make(map[string]serverInfo)
|
||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(es.Gather))
|
||||
require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly")
|
||||
checkNodeStatsResult(t, &acc)
|
||||
}
|
||||
|
||||
func TestGatherIndividualStats(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.NodeStats = []string{"jvm", "process"}
|
||||
es.client.Transport = newTransportMock(nodeStatsResponseJVMProcess)
|
||||
es.serverInfo = make(map[string]serverInfo)
|
||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(es.Gather))
|
||||
require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly")
|
||||
|
||||
tags := defaultTags()
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags)
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", nodestatsJvmExpected, tags)
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags)
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags)
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags)
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_http", nodestatsHTTPExpected, tags)
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags)
|
||||
}
|
||||
|
||||
func TestGatherEnrichStats(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.EnrichStats = true
|
||||
es.client.Transport = newTransportMock(enrichStatsResponse)
|
||||
es.serverInfo = make(map[string]serverInfo)
|
||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(es.Gather))
|
||||
require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly")
|
||||
|
||||
metrics := acc.GetTelegrafMetrics()
|
||||
require.Len(t, metrics, 8)
|
||||
}
|
||||
|
||||
func TestGatherNodeStats(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(nodeStatsResponse)
|
||||
es.serverInfo = make(map[string]serverInfo)
|
||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.gatherNodeStats("junk", &acc))
|
||||
require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly")
|
||||
checkNodeStatsResult(t, &acc)
|
||||
}
|
||||
|
||||
func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.ClusterHealth = true
|
||||
es.ClusterHealthLevel = ""
|
||||
es.client.Transport = newTransportMock(clusterHealthResponse)
|
||||
es.serverInfo = make(map[string]serverInfo)
|
||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.gatherClusterHealth("junk", &acc))
|
||||
require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly")
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"})
|
||||
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices",
|
||||
v1IndexExpected,
|
||||
map[string]string{"index": "v1"})
|
||||
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices",
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2"})
|
||||
}
|
||||
|
||||
func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.ClusterHealth = true
|
||||
es.ClusterHealthLevel = "cluster"
|
||||
es.client.Transport = newTransportMock(clusterHealthResponse)
|
||||
es.serverInfo = make(map[string]serverInfo)
|
||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.gatherClusterHealth("junk", &acc))
|
||||
require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly")
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"})
|
||||
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices",
|
||||
v1IndexExpected,
|
||||
map[string]string{"index": "v1"})
|
||||
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices",
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2"})
|
||||
}
|
||||
|
||||
func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.ClusterHealth = true
|
||||
es.ClusterHealthLevel = "indices"
|
||||
es.client.Transport = newTransportMock(clusterHealthResponseWithIndices)
|
||||
es.serverInfo = make(map[string]serverInfo)
|
||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.gatherClusterHealth("junk", &acc))
|
||||
require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly")
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health_indices",
|
||||
v1IndexExpected,
|
||||
map[string]string{"index": "v1", "name": "elasticsearch_telegraf"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health_indices",
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2", "name": "elasticsearch_telegraf"})
|
||||
}
|
||||
|
||||
func TestGatherClusterStatsMaster(t *testing.T) {
|
||||
// This needs multiple steps to replicate the multiple calls internally.
|
||||
es := newElasticsearchWithClient()
|
||||
es.ClusterStats = true
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.serverInfo = make(map[string]serverInfo)
|
||||
info := serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""}
|
||||
|
||||
// first get catMaster
|
||||
es.client.Transport = newTransportMock(IsMasterResult)
|
||||
masterID, err := es.getCatMaster("junk")
|
||||
require.NoError(t, err)
|
||||
info.masterID = masterID
|
||||
es.serverInfo["http://example.com:9200"] = info
|
||||
|
||||
isMasterResultTokens := strings.Split(IsMasterResult, " ")
|
||||
require.Equal(t, masterID, isMasterResultTokens[0], "catmaster is incorrect")
|
||||
|
||||
// now get node status, which determines whether we're master
|
||||
var acc testutil.Accumulator
|
||||
es.Local = true
|
||||
es.client.Transport = newTransportMock(nodeStatsResponse)
|
||||
require.NoError(t, es.gatherNodeStats("junk", &acc))
|
||||
require.True(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly")
|
||||
checkNodeStatsResult(t, &acc)
|
||||
|
||||
// now test the clusterstats method
|
||||
es.client.Transport = newTransportMock(clusterStatsResponse)
|
||||
require.NoError(t, es.gatherClusterStats("junk", &acc))
|
||||
|
||||
tags := map[string]string{
|
||||
"cluster_name": "es-testcluster",
|
||||
"node_name": "test.host.com",
|
||||
"status": "red",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_clusterstats_nodes", clusterstatsNodesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_clusterstats_indices", clusterstatsIndicesExpected, tags)
|
||||
}
|
||||
|
||||
func TestGatherClusterStatsNonMaster(t *testing.T) {
|
||||
// This needs multiple steps to replicate the multiple calls internally.
|
||||
es := newElasticsearchWithClient()
|
||||
es.ClusterStats = true
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.serverInfo = make(map[string]serverInfo)
|
||||
es.serverInfo["http://example.com:9200"] = serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""}
|
||||
|
||||
// first get catMaster
|
||||
es.client.Transport = newTransportMock(IsNotMasterResult)
|
||||
masterID, err := es.getCatMaster("junk")
|
||||
require.NoError(t, err)
|
||||
|
||||
isNotMasterResultTokens := strings.Split(IsNotMasterResult, " ")
|
||||
require.Equal(t, masterID, isNotMasterResultTokens[0], "catmaster is incorrect")
|
||||
|
||||
// now get node status, which determines whether we're master
|
||||
var acc testutil.Accumulator
|
||||
es.Local = true
|
||||
es.client.Transport = newTransportMock(nodeStatsResponse)
|
||||
require.NoError(t, es.gatherNodeStats("junk", &acc))
|
||||
|
||||
// ensure flag is clear so Cluster Stats would not be done
|
||||
require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly")
|
||||
checkNodeStatsResult(t, &acc)
|
||||
}
|
||||
|
||||
func TestGatherClusterIndicesStats(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.IndicesInclude = []string{"_all"}
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(clusterIndicesResponse)
|
||||
es.serverInfo = make(map[string]serverInfo)
|
||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.gatherIndicesStats("junk", &acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries",
|
||||
clusterIndicesExpected,
|
||||
map[string]string{"index_name": "twitter"})
|
||||
}
|
||||
|
||||
func TestGatherDateStampedIndicesStats(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.IndicesInclude = []string{"twitter*", "influx*", "penguins"}
|
||||
es.NumMostRecentIndices = 2
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(dateStampedIndicesResponse)
|
||||
es.serverInfo = make(map[string]serverInfo)
|
||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||
require.NoError(t, es.Init())
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.gatherIndicesStats(es.Servers[0]+"/"+strings.Join(es.IndicesInclude, ",")+"/_stats", &acc))
|
||||
|
||||
// includes 2 most recent indices for "twitter", only expect the most recent two.
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries",
|
||||
clusterIndicesExpected,
|
||||
map[string]string{"index_name": "twitter_2020_08_02"})
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries",
|
||||
clusterIndicesExpected,
|
||||
map[string]string{"index_name": "twitter_2020_08_01"})
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices_stats_primaries",
|
||||
clusterIndicesExpected,
|
||||
map[string]string{"index_name": "twitter_2020_07_31"})
|
||||
|
||||
// includes 2 most recent indices for "influx", only expect the most recent two.
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries",
|
||||
clusterIndicesExpected,
|
||||
map[string]string{"index_name": "influx2021.01.02"})
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries",
|
||||
clusterIndicesExpected,
|
||||
map[string]string{"index_name": "influx2021.01.01"})
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices_stats_primaries",
|
||||
clusterIndicesExpected,
|
||||
map[string]string{"index_name": "influx2020.12.31"})
|
||||
|
||||
// not configured to sort the 'penguins' index, but ensure it is also included.
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries",
|
||||
clusterIndicesExpected,
|
||||
map[string]string{"index_name": "penguins"})
|
||||
}
|
||||
|
||||
func TestGatherClusterIndiceShardsStats(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.IndicesLevel = "shards"
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(clusterIndicesShardsResponse)
|
||||
es.serverInfo = make(map[string]serverInfo)
|
||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.gatherIndicesStats("junk", &acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries",
|
||||
clusterIndicesExpected,
|
||||
map[string]string{"index_name": "twitter"})
|
||||
|
||||
primaryTags := map[string]string{
|
||||
"index_name": "twitter",
|
||||
"node_id": "oqvR8I1dTpONvwRM30etww",
|
||||
"shard_name": "0",
|
||||
"type": "primary",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_shards",
|
||||
clusterIndicesPrimaryShardsExpected,
|
||||
primaryTags)
|
||||
|
||||
replicaTags := map[string]string{
|
||||
"index_name": "twitter",
|
||||
"node_id": "oqvR8I1dTpONvwRM30etww",
|
||||
"shard_name": "1",
|
||||
"type": "replica",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_shards",
|
||||
clusterIndicesReplicaShardsExpected,
|
||||
replicaTags)
|
||||
}
|
||||
|
||||
func newElasticsearchWithClient() *Elasticsearch {
|
||||
es := newElasticsearch()
|
||||
es.client = &http.Client{}
|
||||
return es
|
||||
}
|
73
plugins/inputs/elasticsearch/sample.conf
Normal file
73
plugins/inputs/elasticsearch/sample.conf
Normal file
|
@ -0,0 +1,73 @@
|
|||
# Read stats from one or more Elasticsearch servers or clusters
|
||||
[[inputs.elasticsearch]]
|
||||
## specify a list of one or more Elasticsearch servers
|
||||
## you can add username and password to your url to use basic authentication:
|
||||
## servers = ["http://user:pass@localhost:9200"]
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
## HTTP headers to send with each request
|
||||
# headers = { "X-Custom-Header" = "Custom" }
|
||||
|
||||
## When local is true (the default), the node will read only its own stats.
|
||||
## Set local to false when you want to read the node stats from all nodes
|
||||
## of the cluster.
|
||||
local = true
|
||||
|
||||
## Set cluster_health to true when you want to obtain cluster health stats
|
||||
cluster_health = false
|
||||
|
||||
## Adjust cluster_health_level when you want to obtain detailed health stats
|
||||
## The options are
|
||||
## - indices (default)
|
||||
## - cluster
|
||||
# cluster_health_level = "indices"
|
||||
|
||||
## Set cluster_stats to true when you want to obtain cluster stats.
|
||||
cluster_stats = false
|
||||
|
||||
## Only gather cluster_stats from the master node.
|
||||
## To work this require local = true
|
||||
cluster_stats_only_from_master = true
|
||||
|
||||
## Gather stats from the enrich API
|
||||
# enrich_stats = false
|
||||
|
||||
## Indices to collect; can be one or more indices names or _all
|
||||
## Use of wildcards is allowed. Use a wildcard at the end to retrieve index
|
||||
## names that end with a changing value, like a date.
|
||||
indices_include = ["_all"]
|
||||
|
||||
## One of "shards", "cluster", "indices"
|
||||
## Currently only "shards" is implemented
|
||||
indices_level = "shards"
|
||||
|
||||
## node_stats is a list of sub-stats that you want to have gathered.
|
||||
## Valid options are "indices", "os", "process", "jvm", "thread_pool",
|
||||
## "fs", "transport", "http", "breaker". Per default, all stats are gathered.
|
||||
# node_stats = ["jvm", "http"]
|
||||
|
||||
## HTTP Basic Authentication username and password.
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## If 'use_system_proxy' is set to true, Telegraf will check env vars such as
|
||||
## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts).
|
||||
## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is
|
||||
## provided, Telegraf will use the specified URL as HTTP proxy.
|
||||
# use_system_proxy = false
|
||||
# http_proxy_url = "http://localhost:8888"
|
||||
|
||||
## Sets the number of most recent indices to return for indices that are
|
||||
## configured with a date-stamped suffix. Each 'indices_include' entry
|
||||
## ending with a wildcard (*) or glob matching pattern will group together
|
||||
## all indices that match it, and sort them by the date or number after
|
||||
## the wildcard. Metrics then are gathered for only the
|
||||
## 'num_most_recent_indices' amount of most recent indices.
|
||||
# num_most_recent_indices = 0
|
5916
plugins/inputs/elasticsearch/testdata_test.go
Normal file
5916
plugins/inputs/elasticsearch/testdata_test.go
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue