Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
346
plugins/inputs/mongodb/README.md
Normal file
346
plugins/inputs/mongodb/README.md
Normal file
|
@ -0,0 +1,346 @@
|
|||
# MongoDB Input Plugin
|
||||
|
||||
This plugin collects metrics about [MongoDB][mongodb] server instances by
|
||||
running database commands.
|
||||
|
||||
> [!NOTE]
|
||||
> This plugin supports all versions marked as supported in the
|
||||
> [MongoDB Software Lifecycle Schedules][lifecycles].
|
||||
|
||||
⭐ Telegraf v0.1.5
|
||||
🏷️ datastore
|
||||
💻 all
|
||||
|
||||
[mongodb]: https://www.mongodb.com
|
||||
[lifecycles]: https://www.mongodb.com/support-policy/lifecycles
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Read metrics from one or many MongoDB servers
|
||||
[[inputs.mongodb]]
|
||||
## An array of URLs of the form:
|
||||
## "mongodb://" [user ":" pass "@"] host [ ":" port]
|
||||
## For example:
|
||||
## mongodb://user:auth_key@10.10.3.30:27017,
|
||||
## mongodb://10.10.3.33:18832,
|
||||
##
|
||||
## If connecting to a cluster, users must include the "?connect=direct" in
|
||||
## the URL to ensure that the connection goes directly to the specified node
|
||||
## and not have all connections passed to the master node.
|
||||
servers = ["mongodb://127.0.0.1:27017/?connect=direct"]
|
||||
|
||||
## When true, collect cluster status.
|
||||
## Note that the query that counts jumbo chunks triggers a COLLSCAN, which
|
||||
## may have an impact on performance.
|
||||
# gather_cluster_status = true
|
||||
|
||||
## When true, collect per database stats
|
||||
# gather_perdb_stats = false
|
||||
|
||||
## When true, collect per collection stats
|
||||
# gather_col_stats = false
|
||||
|
||||
## When true, collect usage statistics for each collection
|
||||
## (insert, update, queries, remove, getmore, commands etc...).
|
||||
# gather_top_stat = false
|
||||
|
||||
## List of db where collections stats are collected
|
||||
## If empty, all db are concerned
|
||||
# col_stats_dbs = ["local"]
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Specifies plugin behavior regarding disconnected servers
|
||||
## Available choices :
|
||||
## - error: telegraf will return an error on startup if one the servers is unreachable
|
||||
## - skip: telegraf will skip unreachable servers on both startup and gather
|
||||
# disconnected_servers_behavior = "error"
|
||||
```
|
||||
|
||||
### Permissions
|
||||
|
||||
If your MongoDB instance has access control enabled you will need to connect
|
||||
as a user with sufficient rights.
|
||||
|
||||
With MongoDB 3.4 and higher, the `clusterMonitor` role can be used. In
|
||||
version 3.2 you may also need these additional permissions:
|
||||
|
||||
```shell
|
||||
> db.grantRolesToUser("user", [{role: "read", actions: "find", db: "local"}])
|
||||
```
|
||||
|
||||
If the user is missing required privileges you may see an error in the
|
||||
Telegraf logs similar to:
|
||||
|
||||
```shell
|
||||
Error in input [mongodb]: not authorized on admin to execute command { serverStatus: 1, recordStats: 0 }
|
||||
```
|
||||
|
||||
Some permission related errors are logged at debug level, you can check these
|
||||
messages by setting `debug = true` in the agent section of the configuration or
|
||||
by running Telegraf with the `--debug` argument.
|
||||
|
||||
## Metrics
|
||||
|
||||
- mongodb
|
||||
- tags:
|
||||
- hostname
|
||||
- node_type
|
||||
- rs_name
|
||||
- fields:
|
||||
- active_reads (integer)
|
||||
- active_writes (integer)
|
||||
- aggregate_command_failed (integer)
|
||||
- aggregate_command_total (integer)
|
||||
- assert_msg (integer)
|
||||
- assert_regular (integer)
|
||||
- assert_rollovers (integer)
|
||||
- assert_user (integer)
|
||||
- assert_warning (integer)
|
||||
- available_reads (integer)
|
||||
- available_writes (integer)
|
||||
- commands (integer)
|
||||
- connections_available (integer)
|
||||
- connections_current (integer)
|
||||
- connections_total_created (integer)
|
||||
- count_command_failed (integer)
|
||||
- count_command_total (integer)
|
||||
- cursor_no_timeout_count (integer)
|
||||
- cursor_pinned_count (integer)
|
||||
- cursor_timed_out_count (integer)
|
||||
- cursor_total_count (integer)
|
||||
- delete_command_failed (integer)
|
||||
- delete_command_total (integer)
|
||||
- deletes (integer)
|
||||
- distinct_command_failed (integer)
|
||||
- distinct_command_total (integer)
|
||||
- document_deleted (integer)
|
||||
- document_inserted (integer)
|
||||
- document_returned (integer)
|
||||
- document_updated (integer)
|
||||
- find_and_modify_command_failed (integer)
|
||||
- find_and_modify_command_total (integer)
|
||||
- find_command_failed (integer)
|
||||
- find_command_total (integer)
|
||||
- flushes (integer)
|
||||
- flushes_total_time_ns (integer)
|
||||
- get_more_command_failed (integer)
|
||||
- get_more_command_total (integer)
|
||||
- getmores (integer)
|
||||
- insert_command_failed (integer)
|
||||
- insert_command_total (integer)
|
||||
- inserts (integer)
|
||||
- jumbo_chunks (integer)
|
||||
- latency_commands_count (integer)
|
||||
- latency_commands (integer)
|
||||
- latency_reads_count (integer)
|
||||
- latency_reads (integer)
|
||||
- latency_writes_count (integer)
|
||||
- latency_writes (integer)
|
||||
- member_status (string)
|
||||
- net_in_bytes_count (integer)
|
||||
- net_out_bytes_count (integer)
|
||||
- open_connections (integer)
|
||||
- operation_scan_and_order (integer)
|
||||
- operation_write_conflicts (integer)
|
||||
- page_faults (integer)
|
||||
- percent_cache_dirty (float)
|
||||
- percent_cache_used (float)
|
||||
- queries (integer)
|
||||
- queued_reads (integer)
|
||||
- queued_writes (integer)
|
||||
- repl_apply_batches_num (integer)
|
||||
- repl_apply_batches_total_millis (integer)
|
||||
- repl_apply_ops (integer)
|
||||
- repl_buffer_count (integer)
|
||||
- repl_buffer_size_bytes (integer)
|
||||
- repl_commands (integer)
|
||||
- repl_deletes (integer)
|
||||
- repl_executor_pool_in_progress_count (integer)
|
||||
- repl_executor_queues_network_in_progress (integer)
|
||||
- repl_executor_queues_sleepers (integer)
|
||||
- repl_executor_unsignaled_events (integer)
|
||||
- repl_getmores (integer)
|
||||
- repl_inserts (integer)
|
||||
- repl_lag (integer)
|
||||
- repl_network_bytes (integer)
|
||||
- repl_network_getmores_num (integer)
|
||||
- repl_network_getmores_total_millis (integer)
|
||||
- repl_network_ops (integer)
|
||||
- repl_queries (integer)
|
||||
- repl_updates (integer)
|
||||
- repl_oplog_window_sec (integer)
|
||||
- repl_state (integer)
|
||||
- repl_member_health (integer)
|
||||
- repl_health_avg (float)
|
||||
- resident_megabytes (integer)
|
||||
- state (string)
|
||||
- storage_freelist_search_bucket_exhausted (integer)
|
||||
- storage_freelist_search_requests (integer)
|
||||
- storage_freelist_search_scanned (integer)
|
||||
- tcmalloc_central_cache_free_bytes (integer)
|
||||
- tcmalloc_current_allocated_bytes (integer)
|
||||
- tcmalloc_current_total_thread_cache_bytes (integer)
|
||||
- tcmalloc_heap_size (integer)
|
||||
- tcmalloc_max_total_thread_cache_bytes (integer)
|
||||
- tcmalloc_pageheap_commit_count (integer)
|
||||
- tcmalloc_pageheap_committed_bytes (integer)
|
||||
- tcmalloc_pageheap_decommit_count (integer)
|
||||
- tcmalloc_pageheap_free_bytes (integer)
|
||||
- tcmalloc_pageheap_reserve_count (integer)
|
||||
- tcmalloc_pageheap_scavenge_count (integer)
|
||||
- tcmalloc_pageheap_total_commit_bytes (integer)
|
||||
- tcmalloc_pageheap_total_decommit_bytes (integer)
|
||||
- tcmalloc_pageheap_total_reserve_bytes (integer)
|
||||
- tcmalloc_pageheap_unmapped_bytes (integer)
|
||||
- tcmalloc_spinlock_total_delay_ns (integer)
|
||||
- tcmalloc_thread_cache_free_bytes (integer)
|
||||
- tcmalloc_total_free_bytes (integer)
|
||||
- tcmalloc_transfer_cache_free_bytes (integer)
|
||||
- total_available (integer)
|
||||
- total_created (integer)
|
||||
- total_docs_scanned (integer)
|
||||
- total_in_use (integer)
|
||||
- total_keys_scanned (integer)
|
||||
- total_refreshing (integer)
|
||||
- total_tickets_reads (integer)
|
||||
- total_tickets_writes (integer)
|
||||
- ttl_deletes (integer)
|
||||
- ttl_passes (integer)
|
||||
- update_command_failed (integer)
|
||||
- update_command_total (integer)
|
||||
- updates (integer)
|
||||
- uptime_ns (integer)
|
||||
- version (string)
|
||||
- vsize_megabytes (integer)
|
||||
- wt_connection_files_currently_open (integer)
|
||||
- wt_data_handles_currently_active (integer)
|
||||
- wtcache_app_threads_page_read_count (integer)
|
||||
- wtcache_app_threads_page_read_time (integer)
|
||||
- wtcache_app_threads_page_write_count (integer)
|
||||
- wtcache_bytes_read_into (integer)
|
||||
- wtcache_bytes_written_from (integer)
|
||||
- wtcache_pages_read_into (integer)
|
||||
- wtcache_pages_requested_from (integer)
|
||||
- wtcache_current_bytes (integer)
|
||||
- wtcache_max_bytes_configured (integer)
|
||||
- wtcache_internal_pages_evicted (integer)
|
||||
- wtcache_modified_pages_evicted (integer)
|
||||
- wtcache_unmodified_pages_evicted (integer)
|
||||
- wtcache_pages_evicted_by_app_thread (integer)
|
||||
- wtcache_pages_queued_for_eviction (integer)
|
||||
- wtcache_server_evicting_pages (integer)
|
||||
- wtcache_tracked_dirty_bytes (integer)
|
||||
- wtcache_worker_thread_evictingpages (integer)
|
||||
- commands_per_sec (integer, deprecated in 1.10; use `commands`))
|
||||
- cursor_no_timeout (integer, opened/sec, deprecated in 1.10; use `cursor_no_timeout_count`))
|
||||
- cursor_pinned (integer, opened/sec, deprecated in 1.10; use `cursor_pinned_count`))
|
||||
- cursor_timed_out (integer, opened/sec, deprecated in 1.10; use `cursor_timed_out_count`))
|
||||
- cursor_total (integer, opened/sec, deprecated in 1.10; use `cursor_total_count`))
|
||||
- deletes_per_sec (integer, deprecated in 1.10; use `deletes`))
|
||||
- flushes_per_sec (integer, deprecated in 1.10; use `flushes`))
|
||||
- getmores_per_sec (integer, deprecated in 1.10; use `getmores`))
|
||||
- inserts_per_sec (integer, deprecated in 1.10; use `inserts`))
|
||||
- net_in_bytes (integer, bytes/sec, deprecated in 1.10; use `net_out_bytes_count`))
|
||||
- net_out_bytes (integer, bytes/sec, deprecated in 1.10; use `net_out_bytes_count`))
|
||||
- queries_per_sec (integer, deprecated in 1.10; use `queries`))
|
||||
- repl_commands_per_sec (integer, deprecated in 1.10; use `repl_commands`))
|
||||
- repl_deletes_per_sec (integer, deprecated in 1.10; use `repl_deletes`)
|
||||
- repl_getmores_per_sec (integer, deprecated in 1.10; use `repl_getmores`)
|
||||
- repl_inserts_per_sec (integer, deprecated in 1.10; use `repl_inserts`))
|
||||
- repl_queries_per_sec (integer, deprecated in 1.10; use `repl_queries`))
|
||||
- repl_updates_per_sec (integer, deprecated in 1.10; use `repl_updates`))
|
||||
- ttl_deletes_per_sec (integer, deprecated in 1.10; use `ttl_deletes`))
|
||||
- ttl_passes_per_sec (integer, deprecated in 1.10; use `ttl_passes`))
|
||||
- updates_per_sec (integer, deprecated in 1.10; use `updates`))
|
||||
|
||||
- mongodb_db_stats
|
||||
- tags:
|
||||
- db_name
|
||||
- hostname
|
||||
- fields:
|
||||
- avg_obj_size (float)
|
||||
- collections (integer)
|
||||
- data_size (integer)
|
||||
- index_size (integer)
|
||||
- indexes (integer)
|
||||
- num_extents (integer)
|
||||
- objects (integer)
|
||||
- ok (integer)
|
||||
- storage_size (integer)
|
||||
- type (string)
|
||||
- fs_used_size (integer)
|
||||
- fs_total_size (integer)
|
||||
|
||||
- mongodb_col_stats
|
||||
- tags:
|
||||
- hostname
|
||||
- collection
|
||||
- db_name
|
||||
- fields:
|
||||
- size (integer)
|
||||
- avg_obj_size (integer)
|
||||
- storage_size (integer)
|
||||
- total_index_size (integer)
|
||||
- ok (integer)
|
||||
- count (integer)
|
||||
- type (string)
|
||||
|
||||
- mongodb_shard_stats
|
||||
- tags:
|
||||
- hostname
|
||||
- fields:
|
||||
- in_use (integer)
|
||||
- available (integer)
|
||||
- created (integer)
|
||||
- refreshing (integer)
|
||||
|
||||
- mongodb_top_stats
|
||||
- tags:
|
||||
- collection
|
||||
- fields:
|
||||
- total_time (integer)
|
||||
- total_count (integer)
|
||||
- read_lock_time (integer)
|
||||
- read_lock_count (integer)
|
||||
- write_lock_time (integer)
|
||||
- write_lock_count (integer)
|
||||
- queries_time (integer)
|
||||
- queries_count (integer)
|
||||
- get_more_time (integer)
|
||||
- get_more_count (integer)
|
||||
- insert_time (integer)
|
||||
- insert_count (integer)
|
||||
- update_time (integer)
|
||||
- update_count (integer)
|
||||
- remove_time (integer)
|
||||
- remove_count (integer)
|
||||
- commands_time (integer)
|
||||
- commands_count (integer)
|
||||
|
||||
## Example Output
|
||||
|
||||
```text
|
||||
mongodb,hostname=127.0.0.1:27017 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=0i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=0i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=65i,commands_per_sec=4i,connections_available=51199i,connections_current=1i,connections_total_created=5i,count_command_failed=0i,count_command_total=7i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=1i,deletes=1i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=0i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=0i,find_command_total=1i,flushes=52i,flushes_per_sec=0i,flushes_total_time_ns=364000000i,get_more_command_failed=0i,get_more_command_total=0i,getmores=0i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=5740i,latency_commands_count=46i,latency_reads=348i,latency_reads_count=7i,latency_writes=0i,latency_writes_count=0i,net_in_bytes=296i,net_in_bytes_count=4262i,net_out_bytes=29322i,net_out_bytes_count=242103i,open_connections=1i,operation_scan_and_order=0i,operation_write_conflicts=0i,page_faults=1i,percent_cache_dirty=0,percent_cache_used=0,queries=1i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,resident_megabytes=33i,storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=0i,tcmalloc_current_allocated_bytes=0i,tcmalloc_current_total_thread_cache_bytes=0i,tcmalloc_heap_size=0i,tcmalloc_max_total_thread_cache_bytes=0i,tcmalloc_pageheap_commit_count=0i,tcmalloc_pageheap_committed_bytes=0i,tcmalloc_pageheap_decommit_count=0i,tcmalloc_pageheap_free_bytes=0i,tcmalloc_pageheap_reserve_count=0i,tcmalloc_pageheap_scavenge_count=0i,tcmalloc_pageheap_total_commit_bytes=0i,tcmalloc_pageheap_total_decommit_bytes=0i,tcmalloc_pageheap_total_reserve_bytes=0i,tcmalloc_pageheap_unmapped_bytes=0i,tcmalloc_spinlock_total_delay_ns=0i,tcmalloc_thread_cache_free_bytes=0i,tcmalloc_total_free_bytes=0i,tcmalloc_transfer_cache_free_bytes=0i,total_available=0i,total_created=0i,total_docs_scanned=0i,total_in_use=0i,total_keys_scanned=0i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=51i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=6135152000000i,version="4.0.19",vsize_megabytes=5088i,wt_connection_files_currently_open=13i,wt_data_handles_currently_active=18i,wtcache_app_threads_page_read_count=99i,wtcache_app_threads_page_read_time=44528i,wtcache_app_threads_page_write_count=19i,wtcache_bytes_read_into=3248195i,wtcache_bytes_written_from=170612i,wtcache_current_bytes=3648788i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=8053063680i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=234i,wtcache_pages_requested_from=18235i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1595691605000000000
|
||||
mongodb,hostname=127.0.0.1:27017,node_type=PRI,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=0i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=25i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=345i,commands_per_sec=4i,connections_available=838853i,connections_current=7i,connections_total_created=13i,count_command_failed=0i,count_command_total=5i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=2i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=4i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=2i,document_returned=56i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=0i,find_command_total=23i,flushes=4i,flushes_per_sec=0i,flushes_total_time_ns=43000000i,get_more_command_failed=0i,get_more_command_total=88i,getmores=88i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=2i,inserts=2i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=82532i,latency_commands_count=337i,latency_reads=30633i,latency_reads_count=111i,latency_writes=0i,latency_writes_count=0i,member_status="PRI",net_in_bytes=636i,net_in_bytes_count=172300i,net_out_bytes=38849i,net_out_bytes_count=335459i,open_connections=7i,operation_scan_and_order=1i,operation_write_conflicts=0i,page_faults=1i,percent_cache_dirty=0,percent_cache_used=0,queries=23i,queries_per_sec=2i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=0i,repl_apply_batches_total_millis=0i,repl_apply_ops=0i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=3i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=0i,repl_network_getmores_num=0i,repl_network_getmores_total_millis=0i,repl_network_ops=0i,repl_oplog_window_sec=140i,repl_queries=0i,repl_queries_per_sec=0i,repl_state=1i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=81i,state="PRIMARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=322128i,tcmalloc_current_allocated_bytes=143566680i,tcmalloc_current_total_thread_cache_bytes=1098968i,tcmalloc_heap_size=181317632i,tcmalloc_max_total_thread_cache_bytes=260046848i,tcmalloc_pageheap_commit_count=53i,tcmalloc_pageheap_committed_bytes=149106688i,tcmalloc_pageheap_decommit_count=1i,tcmalloc_pageheap_free_bytes=3244032i,tcmalloc_pageheap_reserve_count=51i,tcmalloc_pageheap_scavenge_count=1i,tcmalloc_pageheap_total_commit_bytes=183074816i,tcmalloc_pageheap_total_decommit_bytes=33968128i,tcmalloc_pageheap_total_reserve_bytes=181317632i,tcmalloc_pageheap_unmapped_bytes=32210944i,tcmalloc_spinlock_total_delay_ns=0i,tcmalloc_thread_cache_free_bytes=1098968i,tcmalloc_total_free_bytes=2295976i,tcmalloc_transfer_cache_free_bytes=874880i,total_available=0i,total_created=0i,total_docs_scanned=56i,total_in_use=0i,total_keys_scanned=2i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=2i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=166481000000i,version="4.0.19",vsize_megabytes=1482i,wt_connection_files_currently_open=26i,wt_data_handles_currently_active=44i,wtcache_app_threads_page_read_count=0i,wtcache_app_threads_page_read_time=0i,wtcache_app_threads_page_write_count=56i,wtcache_bytes_read_into=0i,wtcache_bytes_written_from=130403i,wtcache_current_bytes=100312i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=506462208i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=0i,wtcache_pages_requested_from=2085i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=63929i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1595691605000000000
|
||||
mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000
|
||||
mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000
|
||||
mongodb_col_stats,collection=foo,db_name=local,hostname=127.0.0.1:27017 size=375005928i,avg_obj_size=5494,type="col_stat",storage_size=249307136i,total_index_size=2138112i,ok=1i,count=68251i 1547159491000000000
|
||||
mongodb_shard_stats,hostname=127.0.0.1:27017,in_use=3i,available=3i,created=4i,refreshing=0i 1522799074000000000
|
||||
mongodb_top_stats,collection=foo,total_time=1471,total_count=158,read_lock_time=49614,read_lock_count=657,write_lock_time=49125456,write_lock_count=9841,queries_time=174,queries_count=495,get_more_time=498,get_more_count=46,insert_time=2651,insert_count=1265,update_time=0,update_count=0,remove_time=0,remove_count=0,commands_time=498611,commands_count=4615
|
||||
```
|
16
plugins/inputs/mongodb/dev/docker-compose.yml
Normal file
16
plugins/inputs/mongodb/dev/docker-compose.yml
Normal file
|
@ -0,0 +1,16 @@
|
|||
version: '3'
|
||||
services:
|
||||
mongodb:
|
||||
image: mongo
|
||||
|
||||
telegraf:
|
||||
image: glinton/scratch
|
||||
volumes:
|
||||
- ./telegraf.conf:/telegraf.conf
|
||||
- ../../../../telegraf:/telegraf
|
||||
depends_on:
|
||||
- mongodb
|
||||
entrypoint:
|
||||
- /telegraf
|
||||
- --config
|
||||
- /telegraf.conf
|
9
plugins/inputs/mongodb/dev/telegraf.conf
Normal file
9
plugins/inputs/mongodb/dev/telegraf.conf
Normal file
|
@ -0,0 +1,9 @@
|
|||
[agent]
|
||||
interval="1s"
|
||||
flush_interval="3s"
|
||||
|
||||
[[inputs.mongodb]]
|
||||
servers = ["mongodb://mongodb:27017"]
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
200
plugins/inputs/mongodb/mongodb.go
Normal file
200
plugins/inputs/mongodb/mongodb.go
Normal file
|
@ -0,0 +1,200 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/choice"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
var disconnectedServersBehaviors = []string{"error", "skip"}
|
||||
|
||||
type MongoDB struct {
|
||||
Servers []string `toml:"servers"`
|
||||
GatherClusterStatus bool `toml:"gather_cluster_status"`
|
||||
GatherPerDBStats bool `toml:"gather_perdb_stats"`
|
||||
GatherColStats bool `toml:"gather_col_stats"`
|
||||
GatherTopStat bool `toml:"gather_top_stat"`
|
||||
DisconnectedServersBehavior string `toml:"disconnected_servers_behavior"`
|
||||
ColStatsDBs []string `toml:"col_stats_dbs"`
|
||||
common_tls.ClientConfig
|
||||
Ssl ssl
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
clients []*server
|
||||
tlsConfig *tls.Config
|
||||
}
|
||||
|
||||
type ssl struct {
|
||||
Enabled bool `toml:"ssl_enabled" deprecated:"1.3.0;1.35.0;use 'tls_*' options instead"`
|
||||
CaCerts []string `toml:"cacerts" deprecated:"1.3.0;1.35.0;use 'tls_ca' instead"`
|
||||
}
|
||||
|
||||
func (*MongoDB) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *MongoDB) Init() error {
|
||||
if m.DisconnectedServersBehavior == "" {
|
||||
m.DisconnectedServersBehavior = "error"
|
||||
}
|
||||
|
||||
if err := choice.Check(m.DisconnectedServersBehavior, disconnectedServersBehaviors); err != nil {
|
||||
return fmt.Errorf("disconnected_servers_behavior: %w", err)
|
||||
}
|
||||
|
||||
if m.Ssl.Enabled {
|
||||
// Deprecated TLS config
|
||||
m.tlsConfig = &tls.Config{
|
||||
InsecureSkipVerify: m.ClientConfig.InsecureSkipVerify,
|
||||
}
|
||||
if len(m.Ssl.CaCerts) == 0 {
|
||||
return errors.New("you must explicitly set insecure_skip_verify to skip certificate validation")
|
||||
}
|
||||
|
||||
roots := x509.NewCertPool()
|
||||
for _, caCert := range m.Ssl.CaCerts {
|
||||
if ok := roots.AppendCertsFromPEM([]byte(caCert)); !ok {
|
||||
return errors.New("failed to parse root certificate")
|
||||
}
|
||||
}
|
||||
m.tlsConfig.RootCAs = roots
|
||||
} else {
|
||||
var err error
|
||||
m.tlsConfig, err = m.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(m.Servers) == 0 {
|
||||
m.Servers = []string{"mongodb://127.0.0.1:27017"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start runs after init and setup mongodb connections
|
||||
func (m *MongoDB) Start(telegraf.Accumulator) error {
|
||||
for _, connURL := range m.Servers {
|
||||
if err := m.setupConnection(connURL); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
for _, client := range m.clients {
|
||||
wg.Add(1)
|
||||
go func(srv *server) {
|
||||
defer wg.Done()
|
||||
if m.DisconnectedServersBehavior == "skip" {
|
||||
if err := srv.ping(); err != nil {
|
||||
m.Log.Debugf("Failed to ping server: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err := srv.gatherData(acc, m.GatherClusterStatus, m.GatherPerDBStats, m.GatherColStats, m.GatherTopStat, m.ColStatsDBs)
|
||||
if err != nil {
|
||||
m.Log.Errorf("Failed to gather data: %s", err)
|
||||
}
|
||||
}(client)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop disconnects mongo connections when stop or reload
|
||||
func (m *MongoDB) Stop() {
|
||||
for _, server := range m.clients {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := server.client.Disconnect(ctx); err != nil {
|
||||
m.Log.Errorf("Disconnecting from %q failed: %v", server.hostname, err)
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MongoDB) setupConnection(connURL string) error {
|
||||
if !strings.HasPrefix(connURL, "mongodb://") && !strings.HasPrefix(connURL, "mongodb+srv://") {
|
||||
// Preserve backwards compatibility for hostnames without a
|
||||
// scheme, broken in go 1.8. Remove in Telegraf 2.0
|
||||
connURL = "mongodb://" + connURL
|
||||
m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", connURL)
|
||||
}
|
||||
|
||||
u, err := url.Parse(connURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse connection URL: %w", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
opts := options.Client().ApplyURI(connURL)
|
||||
if m.tlsConfig != nil {
|
||||
opts.TLSConfig = m.tlsConfig
|
||||
}
|
||||
if opts.ReadPreference == nil {
|
||||
opts.ReadPreference = readpref.Nearest()
|
||||
}
|
||||
|
||||
client, err := mongo.Connect(ctx, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to connect to MongoDB: %w", err)
|
||||
}
|
||||
|
||||
err = client.Ping(ctx, opts.ReadPreference)
|
||||
if err != nil {
|
||||
if m.DisconnectedServersBehavior == "error" {
|
||||
return fmt.Errorf("unable to ping MongoDB: %w", err)
|
||||
}
|
||||
|
||||
m.Log.Errorf("Unable to ping MongoDB: %s", err)
|
||||
}
|
||||
|
||||
server := &server{
|
||||
client: client,
|
||||
hostname: u.Host,
|
||||
log: m.Log,
|
||||
}
|
||||
m.clients = append(m.clients, server)
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("mongodb", func() telegraf.Input {
|
||||
return &MongoDB{
|
||||
GatherClusterStatus: true,
|
||||
GatherPerDBStats: false,
|
||||
GatherColStats: false,
|
||||
GatherTopStat: false,
|
||||
ColStatsDBs: []string{"local"},
|
||||
}
|
||||
})
|
||||
}
|
478
plugins/inputs/mongodb/mongodb_data.go
Normal file
478
plugins/inputs/mongodb/mongodb_data.go
Normal file
|
@ -0,0 +1,478 @@
|
|||
package mongodb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type mongoDBData struct {
|
||||
StatLine *statLine
|
||||
Fields map[string]interface{}
|
||||
Tags map[string]string
|
||||
DBData []bbData
|
||||
ColData []colData
|
||||
ShardHostData []bbData
|
||||
TopStatsData []bbData
|
||||
}
|
||||
|
||||
type bbData struct {
|
||||
Name string
|
||||
Fields map[string]interface{}
|
||||
}
|
||||
|
||||
type colData struct {
|
||||
Name string
|
||||
DBName string
|
||||
Fields map[string]interface{}
|
||||
}
|
||||
|
||||
func newMongodbData(statLine *statLine, tags map[string]string) *mongoDBData {
|
||||
return &mongoDBData{
|
||||
StatLine: statLine,
|
||||
Tags: tags,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
var defaultStats = map[string]string{
|
||||
"uptime_ns": "UptimeNanos",
|
||||
"inserts": "InsertCnt",
|
||||
"inserts_per_sec": "Insert",
|
||||
"queries": "QueryCnt",
|
||||
"queries_per_sec": "Query",
|
||||
"updates": "UpdateCnt",
|
||||
"updates_per_sec": "Update",
|
||||
"deletes": "DeleteCnt",
|
||||
"deletes_per_sec": "Delete",
|
||||
"getmores": "GetMoreCnt",
|
||||
"getmores_per_sec": "GetMore",
|
||||
"commands": "CommandCnt",
|
||||
"commands_per_sec": "Command",
|
||||
"flushes": "FlushesCnt",
|
||||
"flushes_per_sec": "Flushes",
|
||||
"flushes_total_time_ns": "FlushesTotalTime",
|
||||
"vsize_megabytes": "Virtual",
|
||||
"resident_megabytes": "Resident",
|
||||
"queued_reads": "QueuedReaders",
|
||||
"queued_writes": "QueuedWriters",
|
||||
"active_reads": "ActiveReaders",
|
||||
"active_writes": "ActiveWriters",
|
||||
"available_reads": "AvailableReaders",
|
||||
"available_writes": "AvailableWriters",
|
||||
"total_tickets_reads": "TotalTicketsReaders",
|
||||
"total_tickets_writes": "TotalTicketsWriters",
|
||||
"net_in_bytes_count": "NetInCnt",
|
||||
"net_in_bytes": "NetIn",
|
||||
"net_out_bytes_count": "NetOutCnt",
|
||||
"net_out_bytes": "NetOut",
|
||||
"open_connections": "NumConnections",
|
||||
"ttl_deletes": "DeletedDocumentsCnt",
|
||||
"ttl_deletes_per_sec": "DeletedDocuments",
|
||||
"ttl_passes": "PassesCnt",
|
||||
"ttl_passes_per_sec": "Passes",
|
||||
"cursor_timed_out": "TimedOutC",
|
||||
"cursor_timed_out_count": "TimedOutCCnt",
|
||||
"cursor_no_timeout": "NoTimeoutC",
|
||||
"cursor_no_timeout_count": "NoTimeoutCCnt",
|
||||
"cursor_pinned": "PinnedC",
|
||||
"cursor_pinned_count": "PinnedCCnt",
|
||||
"cursor_total": "TotalC",
|
||||
"cursor_total_count": "TotalCCnt",
|
||||
"document_deleted": "DeletedD",
|
||||
"document_inserted": "InsertedD",
|
||||
"document_returned": "ReturnedD",
|
||||
"document_updated": "UpdatedD",
|
||||
"connections_current": "CurrentC",
|
||||
"connections_available": "AvailableC",
|
||||
"connections_total_created": "TotalCreatedC",
|
||||
"operation_scan_and_order": "ScanAndOrderOp",
|
||||
"operation_write_conflicts": "WriteConflictsOp",
|
||||
"total_keys_scanned": "TotalKeysScanned",
|
||||
"total_docs_scanned": "TotalObjectsScanned",
|
||||
}
|
||||
|
||||
var defaultAssertsStats = map[string]string{
|
||||
"assert_regular": "Regular",
|
||||
"assert_warning": "Warning",
|
||||
"assert_msg": "Msg",
|
||||
"assert_user": "User",
|
||||
"assert_rollovers": "Rollovers",
|
||||
}
|
||||
|
||||
var defaultCommandsStats = map[string]string{
|
||||
"aggregate_command_total": "AggregateCommandTotal",
|
||||
"aggregate_command_failed": "AggregateCommandFailed",
|
||||
"count_command_total": "CountCommandTotal",
|
||||
"count_command_failed": "CountCommandFailed",
|
||||
"delete_command_total": "DeleteCommandTotal",
|
||||
"delete_command_failed": "DeleteCommandFailed",
|
||||
"distinct_command_total": "DistinctCommandTotal",
|
||||
"distinct_command_failed": "DistinctCommandFailed",
|
||||
"find_command_total": "FindCommandTotal",
|
||||
"find_command_failed": "FindCommandFailed",
|
||||
"find_and_modify_command_total": "FindAndModifyCommandTotal",
|
||||
"find_and_modify_command_failed": "FindAndModifyCommandFailed",
|
||||
"get_more_command_total": "GetMoreCommandTotal",
|
||||
"get_more_command_failed": "GetMoreCommandFailed",
|
||||
"insert_command_total": "InsertCommandTotal",
|
||||
"insert_command_failed": "InsertCommandFailed",
|
||||
"update_command_total": "UpdateCommandTotal",
|
||||
"update_command_failed": "UpdateCommandFailed",
|
||||
}
|
||||
|
||||
var defaultLatencyStats = map[string]string{
|
||||
"latency_writes_count": "WriteOpsCnt",
|
||||
"latency_writes": "WriteLatency",
|
||||
"latency_reads_count": "ReadOpsCnt",
|
||||
"latency_reads": "ReadLatency",
|
||||
"latency_commands_count": "CommandOpsCnt",
|
||||
"latency_commands": "CommandLatency",
|
||||
}
|
||||
|
||||
var defaultReplStats = map[string]string{
|
||||
"repl_inserts": "InsertRCnt",
|
||||
"repl_inserts_per_sec": "InsertR",
|
||||
"repl_queries": "QueryRCnt",
|
||||
"repl_queries_per_sec": "QueryR",
|
||||
"repl_updates": "UpdateRCnt",
|
||||
"repl_updates_per_sec": "UpdateR",
|
||||
"repl_deletes": "DeleteRCnt",
|
||||
"repl_deletes_per_sec": "DeleteR",
|
||||
"repl_getmores": "GetMoreRCnt",
|
||||
"repl_getmores_per_sec": "GetMoreR",
|
||||
"repl_commands": "CommandRCnt",
|
||||
"repl_commands_per_sec": "CommandR",
|
||||
"member_status": "NodeType",
|
||||
"state": "NodeState",
|
||||
"repl_state": "NodeStateInt",
|
||||
"repl_member_health": "NodeHealthInt",
|
||||
"repl_health_avg": "ReplHealthAvg",
|
||||
"repl_lag": "ReplLag",
|
||||
"repl_network_bytes": "ReplNetworkBytes",
|
||||
"repl_network_getmores_num": "ReplNetworkGetmoresNum",
|
||||
"repl_network_getmores_total_millis": "ReplNetworkGetmoresTotalMillis",
|
||||
"repl_network_ops": "ReplNetworkOps",
|
||||
"repl_buffer_count": "ReplBufferCount",
|
||||
"repl_buffer_size_bytes": "ReplBufferSizeBytes",
|
||||
"repl_apply_batches_num": "ReplApplyBatchesNum",
|
||||
"repl_apply_batches_total_millis": "ReplApplyBatchesTotalMillis",
|
||||
"repl_apply_ops": "ReplApplyOps",
|
||||
"repl_executor_pool_in_progress_count": "ReplExecutorPoolInProgressCount",
|
||||
"repl_executor_queues_network_in_progress": "ReplExecutorQueuesNetworkInProgress",
|
||||
"repl_executor_queues_sleepers": "ReplExecutorQueuesSleepers",
|
||||
"repl_executor_unsignaled_events": "ReplExecutorUnsignaledEvents",
|
||||
}
|
||||
|
||||
var defaultClusterStats = map[string]string{
|
||||
"jumbo_chunks": "JumboChunksCount",
|
||||
}
|
||||
|
||||
var defaultShardStats = map[string]string{
|
||||
"total_in_use": "TotalInUse",
|
||||
"total_available": "TotalAvailable",
|
||||
"total_created": "TotalCreated",
|
||||
"total_refreshing": "TotalRefreshing",
|
||||
}
|
||||
|
||||
var shardHostStats = map[string]string{
|
||||
"in_use": "InUse",
|
||||
"available": "Available",
|
||||
"created": "Created",
|
||||
"refreshing": "Refreshing",
|
||||
}
|
||||
|
||||
var mmapStats = map[string]string{
|
||||
"mapped_megabytes": "Mapped",
|
||||
"non-mapped_megabytes": "NonMapped",
|
||||
"page_faults": "FaultsCnt",
|
||||
"page_faults_per_sec": "Faults",
|
||||
}
|
||||
|
||||
var wiredTigerStats = map[string]string{
|
||||
"percent_cache_dirty": "CacheDirtyPercent",
|
||||
"percent_cache_used": "CacheUsedPercent",
|
||||
}
|
||||
|
||||
var wiredTigerExtStats = map[string]string{
|
||||
"wtcache_tracked_dirty_bytes": "TrackedDirtyBytes",
|
||||
"wtcache_current_bytes": "CurrentCachedBytes",
|
||||
"wtcache_max_bytes_configured": "MaxBytesConfigured",
|
||||
"wtcache_app_threads_page_read_count": "AppThreadsPageReadCount",
|
||||
"wtcache_app_threads_page_read_time": "AppThreadsPageReadTime",
|
||||
"wtcache_app_threads_page_write_count": "AppThreadsPageWriteCount",
|
||||
"wtcache_bytes_written_from": "BytesWrittenFrom",
|
||||
"wtcache_bytes_read_into": "BytesReadInto",
|
||||
"wtcache_pages_evicted_by_app_thread": "PagesEvictedByAppThread",
|
||||
"wtcache_pages_queued_for_eviction": "PagesQueuedForEviction",
|
||||
"wtcache_pages_read_into": "PagesReadIntoCache",
|
||||
"wtcache_pages_written_from": "PagesWrittenFromCache",
|
||||
"wtcache_pages_requested_from": "PagesRequestedFromCache",
|
||||
"wtcache_server_evicting_pages": "ServerEvictingPages",
|
||||
"wtcache_worker_thread_evictingpages": "WorkerThreadEvictingPages",
|
||||
"wtcache_internal_pages_evicted": "InternalPagesEvicted",
|
||||
"wtcache_modified_pages_evicted": "ModifiedPagesEvicted",
|
||||
"wtcache_unmodified_pages_evicted": "UnmodifiedPagesEvicted",
|
||||
}
|
||||
|
||||
var wiredTigerConnectionStats = map[string]string{
|
||||
"wt_connection_files_currently_open": "FilesCurrentlyOpen",
|
||||
}
|
||||
|
||||
var wiredTigerDataHandleStats = map[string]string{
|
||||
"wt_data_handles_currently_active": "DataHandlesCurrentlyActive",
|
||||
}
|
||||
|
||||
var defaultTCMallocStats = map[string]string{
|
||||
"tcmalloc_current_allocated_bytes": "TCMallocCurrentAllocatedBytes",
|
||||
"tcmalloc_heap_size": "TCMallocHeapSize",
|
||||
"tcmalloc_central_cache_free_bytes": "TCMallocCentralCacheFreeBytes",
|
||||
"tcmalloc_current_total_thread_cache_bytes": "TCMallocCurrentTotalThreadCacheBytes",
|
||||
"tcmalloc_max_total_thread_cache_bytes": "TCMallocMaxTotalThreadCacheBytes",
|
||||
"tcmalloc_total_free_bytes": "TCMallocTotalFreeBytes",
|
||||
"tcmalloc_transfer_cache_free_bytes": "TCMallocTransferCacheFreeBytes",
|
||||
"tcmalloc_thread_cache_free_bytes": "TCMallocThreadCacheFreeBytes",
|
||||
"tcmalloc_spinlock_total_delay_ns": "TCMallocSpinLockTotalDelayNanos",
|
||||
"tcmalloc_pageheap_free_bytes": "TCMallocPageheapFreeBytes",
|
||||
"tcmalloc_pageheap_unmapped_bytes": "TCMallocPageheapUnmappedBytes",
|
||||
"tcmalloc_pageheap_committed_bytes": "TCMallocPageheapComittedBytes",
|
||||
"tcmalloc_pageheap_scavenge_count": "TCMallocPageheapScavengeCount",
|
||||
"tcmalloc_pageheap_commit_count": "TCMallocPageheapCommitCount",
|
||||
"tcmalloc_pageheap_total_commit_bytes": "TCMallocPageheapTotalCommitBytes",
|
||||
"tcmalloc_pageheap_decommit_count": "TCMallocPageheapDecommitCount",
|
||||
"tcmalloc_pageheap_total_decommit_bytes": "TCMallocPageheapTotalDecommitBytes",
|
||||
"tcmalloc_pageheap_reserve_count": "TCMallocPageheapReserveCount",
|
||||
"tcmalloc_pageheap_total_reserve_bytes": "TCMallocPageheapTotalReserveBytes",
|
||||
}
|
||||
|
||||
var defaultStorageStats = map[string]string{
|
||||
"storage_freelist_search_bucket_exhausted": "StorageFreelistSearchBucketExhausted",
|
||||
"storage_freelist_search_requests": "StorageFreelistSearchRequests",
|
||||
"storage_freelist_search_scanned": "StorageFreelistSearchScanned",
|
||||
}
|
||||
|
||||
var dbDataStats = map[string]string{
|
||||
"collections": "Collections",
|
||||
"objects": "Objects",
|
||||
"avg_obj_size": "AvgObjSize",
|
||||
"data_size": "DataSize",
|
||||
"storage_size": "StorageSize",
|
||||
"num_extents": "NumExtents",
|
||||
"indexes": "Indexes",
|
||||
"index_size": "IndexSize",
|
||||
"ok": "Ok",
|
||||
"fs_used_size": "FsUsedSize",
|
||||
"fs_total_size": "FsTotalSize",
|
||||
}
|
||||
|
||||
var colDataStats = map[string]string{
|
||||
"count": "Count",
|
||||
"size": "Size",
|
||||
"avg_obj_size": "AvgObjSize",
|
||||
"storage_size": "StorageSize",
|
||||
"total_index_size": "TotalIndexSize",
|
||||
"ok": "Ok",
|
||||
}
|
||||
|
||||
var topDataStats = map[string]string{
|
||||
"total_time": "TotalTime",
|
||||
"total_count": "TotalCount",
|
||||
"read_lock_time": "ReadLockTime",
|
||||
"read_lock_count": "ReadLockCount",
|
||||
"write_lock_time": "WriteLockTime",
|
||||
"write_lock_count": "WriteLockCount",
|
||||
"queries_time": "QueriesTime",
|
||||
"queries_count": "QueriesCount",
|
||||
"get_more_time": "GetMoreTime",
|
||||
"get_more_count": "GetMoreCount",
|
||||
"insert_time": "InsertTime",
|
||||
"insert_count": "InsertCount",
|
||||
"update_time": "UpdateTime",
|
||||
"update_count": "UpdateCount",
|
||||
"remove_time": "RemoveTime",
|
||||
"remove_count": "RemoveCount",
|
||||
"commands_time": "CommandsTime",
|
||||
"commands_count": "CommandsCount",
|
||||
}
|
||||
|
||||
func (d *mongoDBData) addDBStats() {
|
||||
for i := range d.StatLine.DBStatsLines {
|
||||
dbStat := d.StatLine.DBStatsLines[i]
|
||||
dbStatLine := reflect.ValueOf(&dbStat).Elem()
|
||||
newDBData := &bbData{
|
||||
Name: dbStat.Name,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
newDBData.Fields["type"] = "db_stat"
|
||||
for key, value := range dbDataStats {
|
||||
val := dbStatLine.FieldByName(value).Interface()
|
||||
newDBData.Fields[key] = val
|
||||
}
|
||||
d.DBData = append(d.DBData, *newDBData)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *mongoDBData) addColStats() {
|
||||
for i := range d.StatLine.ColStatsLines {
|
||||
colstat := d.StatLine.ColStatsLines[i]
|
||||
colStatLine := reflect.ValueOf(&colstat).Elem()
|
||||
newColData := &colData{
|
||||
Name: colstat.Name,
|
||||
DBName: colstat.DBName,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
newColData.Fields["type"] = "col_stat"
|
||||
for key, value := range colDataStats {
|
||||
val := colStatLine.FieldByName(value).Interface()
|
||||
newColData.Fields[key] = val
|
||||
}
|
||||
d.ColData = append(d.ColData, *newColData)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *mongoDBData) addShardHostStats() {
|
||||
for host := range d.StatLine.ShardHostStatsLines {
|
||||
hostStat := d.StatLine.ShardHostStatsLines[host]
|
||||
hostStatLine := reflect.ValueOf(&hostStat).Elem()
|
||||
newDBData := &bbData{
|
||||
Name: host,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
newDBData.Fields["type"] = "shard_host_stat"
|
||||
for k, v := range shardHostStats {
|
||||
val := hostStatLine.FieldByName(v).Interface()
|
||||
newDBData.Fields[k] = val
|
||||
}
|
||||
d.ShardHostData = append(d.ShardHostData, *newDBData)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *mongoDBData) addTopStats() {
|
||||
for i := range d.StatLine.TopStatLines {
|
||||
topStat := d.StatLine.TopStatLines[i]
|
||||
topStatLine := reflect.ValueOf(&topStat).Elem()
|
||||
newTopStatData := &bbData{
|
||||
Name: topStat.CollectionName,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
newTopStatData.Fields["type"] = "top_stat"
|
||||
for key, value := range topDataStats {
|
||||
val := topStatLine.FieldByName(value).Interface()
|
||||
newTopStatData.Fields[key] = val
|
||||
}
|
||||
d.TopStatsData = append(d.TopStatsData, *newTopStatData)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *mongoDBData) addDefaultStats() {
|
||||
statLine := reflect.ValueOf(d.StatLine).Elem()
|
||||
d.addStat(statLine, defaultStats)
|
||||
if d.StatLine.NodeType != "" {
|
||||
d.addStat(statLine, defaultReplStats)
|
||||
d.Tags["node_type"] = d.StatLine.NodeType
|
||||
}
|
||||
|
||||
if d.StatLine.ReadLatency > 0 {
|
||||
d.addStat(statLine, defaultLatencyStats)
|
||||
}
|
||||
|
||||
if d.StatLine.ReplSetName != "" {
|
||||
d.Tags["rs_name"] = d.StatLine.ReplSetName
|
||||
}
|
||||
|
||||
if d.StatLine.OplogStats != nil {
|
||||
d.add("repl_oplog_window_sec", d.StatLine.OplogStats.TimeDiff)
|
||||
}
|
||||
|
||||
if d.StatLine.Version != "" {
|
||||
d.add("version", d.StatLine.Version)
|
||||
}
|
||||
|
||||
d.addStat(statLine, defaultAssertsStats)
|
||||
d.addStat(statLine, defaultClusterStats)
|
||||
d.addStat(statLine, defaultCommandsStats)
|
||||
d.addStat(statLine, defaultShardStats)
|
||||
d.addStat(statLine, defaultStorageStats)
|
||||
d.addStat(statLine, defaultTCMallocStats)
|
||||
|
||||
if d.StatLine.StorageEngine == "mmapv1" || d.StatLine.StorageEngine == "rocksdb" {
|
||||
d.addStat(statLine, mmapStats)
|
||||
} else if d.StatLine.StorageEngine == "wiredTiger" {
|
||||
for key, value := range wiredTigerStats {
|
||||
val := statLine.FieldByName(value).Interface()
|
||||
percentVal := fmt.Sprintf("%.1f", val.(float64)*100)
|
||||
//nolint:errcheck // guaranteed to be formatted properly because of the above
|
||||
floatVal, _ := strconv.ParseFloat(percentVal, 64)
|
||||
d.add(key, floatVal)
|
||||
}
|
||||
d.addStat(statLine, wiredTigerExtStats)
|
||||
d.addStat(statLine, wiredTigerConnectionStats)
|
||||
d.addStat(statLine, wiredTigerDataHandleStats)
|
||||
d.add("page_faults", d.StatLine.FaultsCnt)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *mongoDBData) addStat(statLine reflect.Value, stats map[string]string) {
|
||||
for key, value := range stats {
|
||||
val := statLine.FieldByName(value).Interface()
|
||||
d.add(key, val)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *mongoDBData) add(key string, val interface{}) {
|
||||
d.Fields[key] = val
|
||||
}
|
||||
|
||||
func (d *mongoDBData) flush(acc telegraf.Accumulator) {
|
||||
acc.AddFields(
|
||||
"mongodb",
|
||||
d.Fields,
|
||||
d.Tags,
|
||||
d.StatLine.Time,
|
||||
)
|
||||
d.Fields = make(map[string]interface{})
|
||||
|
||||
for _, db := range d.DBData {
|
||||
d.Tags["db_name"] = db.Name
|
||||
acc.AddFields(
|
||||
"mongodb_db_stats",
|
||||
db.Fields,
|
||||
d.Tags,
|
||||
d.StatLine.Time,
|
||||
)
|
||||
db.Fields = make(map[string]interface{})
|
||||
}
|
||||
for _, col := range d.ColData {
|
||||
d.Tags["collection"] = col.Name
|
||||
d.Tags["db_name"] = col.DBName
|
||||
acc.AddFields(
|
||||
"mongodb_col_stats",
|
||||
col.Fields,
|
||||
d.Tags,
|
||||
d.StatLine.Time,
|
||||
)
|
||||
col.Fields = make(map[string]interface{})
|
||||
}
|
||||
for _, host := range d.ShardHostData {
|
||||
d.Tags["hostname"] = host.Name
|
||||
acc.AddFields(
|
||||
"mongodb_shard_stats",
|
||||
host.Fields,
|
||||
d.Tags,
|
||||
d.StatLine.Time,
|
||||
)
|
||||
host.Fields = make(map[string]interface{})
|
||||
}
|
||||
for _, col := range d.TopStatsData {
|
||||
d.Tags["collection"] = col.Name
|
||||
acc.AddFields(
|
||||
"mongodb_top_stats",
|
||||
col.Fields,
|
||||
d.Tags,
|
||||
d.StatLine.Time,
|
||||
)
|
||||
col.Fields = make(map[string]interface{})
|
||||
}
|
||||
}
|
543
plugins/inputs/mongodb/mongodb_data_test.go
Normal file
543
plugins/inputs/mongodb/mongodb_data_test.go
Normal file
|
@ -0,0 +1,543 @@
|
|||
package mongodb
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var tags = make(map[string]string)
|
||||
|
||||
func TestAddNonReplStats(t *testing.T) {
|
||||
d := newMongodbData(
|
||||
&statLine{
|
||||
StorageEngine: "",
|
||||
Time: time.Now(),
|
||||
UptimeNanos: 0,
|
||||
Insert: 0,
|
||||
Query: 0,
|
||||
Update: 0,
|
||||
UpdateCnt: 0,
|
||||
Delete: 0,
|
||||
GetMore: 0,
|
||||
Command: 0,
|
||||
Flushes: 0,
|
||||
FlushesCnt: 0,
|
||||
Virtual: 0,
|
||||
Resident: 0,
|
||||
QueuedReaders: 0,
|
||||
QueuedWriters: 0,
|
||||
ActiveReaders: 0,
|
||||
ActiveWriters: 0,
|
||||
AvailableReaders: 0,
|
||||
AvailableWriters: 0,
|
||||
TotalTicketsReaders: 0,
|
||||
TotalTicketsWriters: 0,
|
||||
NetIn: 0,
|
||||
NetOut: 0,
|
||||
NumConnections: 0,
|
||||
Passes: 0,
|
||||
DeletedDocuments: 0,
|
||||
TimedOutC: 0,
|
||||
NoTimeoutC: 0,
|
||||
PinnedC: 0,
|
||||
TotalC: 0,
|
||||
DeletedD: 0,
|
||||
InsertedD: 0,
|
||||
ReturnedD: 0,
|
||||
UpdatedD: 0,
|
||||
CurrentC: 0,
|
||||
AvailableC: 0,
|
||||
TotalCreatedC: 0,
|
||||
ScanAndOrderOp: 0,
|
||||
WriteConflictsOp: 0,
|
||||
TotalKeysScanned: 0,
|
||||
TotalObjectsScanned: 0,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.addDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key := range defaultStats {
|
||||
require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddReplStats(t *testing.T) {
|
||||
d := newMongodbData(
|
||||
&statLine{
|
||||
StorageEngine: "mmapv1",
|
||||
Mapped: 0,
|
||||
NonMapped: 0,
|
||||
Faults: 0,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.addDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key := range mmapStats {
|
||||
require.True(t, acc.HasInt64Field("mongodb", key), key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddWiredTigerStats(t *testing.T) {
|
||||
d := newMongodbData(
|
||||
&statLine{
|
||||
StorageEngine: "wiredTiger",
|
||||
CacheDirtyPercent: 0,
|
||||
CacheUsedPercent: 0,
|
||||
TrackedDirtyBytes: 0,
|
||||
CurrentCachedBytes: 0,
|
||||
MaxBytesConfigured: 0,
|
||||
AppThreadsPageReadCount: 0,
|
||||
AppThreadsPageReadTime: 0,
|
||||
AppThreadsPageWriteCount: 0,
|
||||
BytesWrittenFrom: 0,
|
||||
BytesReadInto: 0,
|
||||
PagesEvictedByAppThread: 0,
|
||||
PagesQueuedForEviction: 0,
|
||||
ServerEvictingPages: 0,
|
||||
WorkerThreadEvictingPages: 0,
|
||||
PagesReadIntoCache: 0,
|
||||
PagesRequestedFromCache: 0,
|
||||
PagesWrittenFromCache: 1247,
|
||||
InternalPagesEvicted: 0,
|
||||
ModifiedPagesEvicted: 0,
|
||||
UnmodifiedPagesEvicted: 0,
|
||||
FilesCurrentlyOpen: 0,
|
||||
DataHandlesCurrentlyActive: 0,
|
||||
FaultsCnt: 204,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.addDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key := range wiredTigerStats {
|
||||
require.True(t, acc.HasFloatField("mongodb", key), key)
|
||||
}
|
||||
|
||||
for key := range wiredTigerExtStats {
|
||||
require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key)
|
||||
}
|
||||
|
||||
require.True(t, acc.HasInt64Field("mongodb", "page_faults"))
|
||||
}
|
||||
|
||||
func TestAddShardStats(t *testing.T) {
|
||||
d := newMongodbData(
|
||||
&statLine{
|
||||
TotalInUse: 0,
|
||||
TotalAvailable: 0,
|
||||
TotalCreated: 0,
|
||||
TotalRefreshing: 0,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.addDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key := range defaultShardStats {
|
||||
require.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddLatencyStats(t *testing.T) {
|
||||
d := newMongodbData(
|
||||
&statLine{
|
||||
CommandOpsCnt: 73,
|
||||
CommandLatency: 364,
|
||||
ReadOpsCnt: 113,
|
||||
ReadLatency: 201,
|
||||
WriteOpsCnt: 7,
|
||||
WriteLatency: 55,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.addDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key := range defaultLatencyStats {
|
||||
require.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddAssertsStats(t *testing.T) {
|
||||
d := newMongodbData(
|
||||
&statLine{
|
||||
Regular: 3,
|
||||
Warning: 9,
|
||||
Msg: 2,
|
||||
User: 34,
|
||||
Rollovers: 0,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.addDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key := range defaultAssertsStats {
|
||||
require.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddCommandsStats(t *testing.T) {
|
||||
d := newMongodbData(
|
||||
&statLine{
|
||||
AggregateCommandTotal: 12,
|
||||
AggregateCommandFailed: 2,
|
||||
CountCommandTotal: 18,
|
||||
CountCommandFailed: 5,
|
||||
DeleteCommandTotal: 73,
|
||||
DeleteCommandFailed: 364,
|
||||
DistinctCommandTotal: 87,
|
||||
DistinctCommandFailed: 19,
|
||||
FindCommandTotal: 113,
|
||||
FindCommandFailed: 201,
|
||||
FindAndModifyCommandTotal: 7,
|
||||
FindAndModifyCommandFailed: 55,
|
||||
GetMoreCommandTotal: 4,
|
||||
GetMoreCommandFailed: 55,
|
||||
InsertCommandTotal: 34,
|
||||
InsertCommandFailed: 65,
|
||||
UpdateCommandTotal: 23,
|
||||
UpdateCommandFailed: 6,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.addDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key := range defaultCommandsStats {
|
||||
require.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddTCMallocStats(t *testing.T) {
|
||||
d := newMongodbData(
|
||||
&statLine{
|
||||
TCMallocCurrentAllocatedBytes: 5877253096,
|
||||
TCMallocHeapSize: 8067108864,
|
||||
TCMallocPageheapFreeBytes: 1054994432,
|
||||
TCMallocPageheapUnmappedBytes: 677859328,
|
||||
TCMallocMaxTotalThreadCacheBytes: 1073741824,
|
||||
TCMallocCurrentTotalThreadCacheBytes: 80405312,
|
||||
TCMallocTotalFreeBytes: 457002008,
|
||||
TCMallocCentralCacheFreeBytes: 375131800,
|
||||
TCMallocTransferCacheFreeBytes: 1464896,
|
||||
TCMallocThreadCacheFreeBytes: 80405312,
|
||||
TCMallocPageheapComittedBytes: 7389249536,
|
||||
TCMallocPageheapScavengeCount: 396394,
|
||||
TCMallocPageheapCommitCount: 641765,
|
||||
TCMallocPageheapTotalCommitBytes: 102248751104,
|
||||
TCMallocPageheapDecommitCount: 396394,
|
||||
TCMallocPageheapTotalDecommitBytes: 94859501568,
|
||||
TCMallocPageheapReserveCount: 6179,
|
||||
TCMallocPageheapTotalReserveBytes: 8067108864,
|
||||
TCMallocSpinLockTotalDelayNanos: 2344453860,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.addDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key := range defaultTCMallocStats {
|
||||
require.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddStorageStats(t *testing.T) {
|
||||
d := newMongodbData(
|
||||
&statLine{
|
||||
StorageFreelistSearchBucketExhausted: 0,
|
||||
StorageFreelistSearchRequests: 0,
|
||||
StorageFreelistSearchScanned: 0,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.addDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key := range defaultStorageStats {
|
||||
require.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddShardHostStats(t *testing.T) {
|
||||
expectedHosts := []string{"hostA", "hostB"}
|
||||
hostStatLines := map[string]shardHostStatLine{}
|
||||
for _, host := range expectedHosts {
|
||||
hostStatLines[host] = shardHostStatLine{
|
||||
InUse: 0,
|
||||
Available: 0,
|
||||
Created: 0,
|
||||
Refreshing: 0,
|
||||
}
|
||||
}
|
||||
|
||||
d := newMongodbData(
|
||||
&statLine{
|
||||
ShardHostStatsLines: hostStatLines,
|
||||
},
|
||||
map[string]string{}, // Use empty tags, so we don't break existing tests
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
d.addShardHostStats()
|
||||
d.flush(&acc)
|
||||
|
||||
hostsFound := make([]string, 0, len(hostStatLines))
|
||||
for host := range hostStatLines {
|
||||
for key := range shardHostStats {
|
||||
require.True(t, acc.HasInt64Field("mongodb_shard_stats", key))
|
||||
}
|
||||
|
||||
require.True(t, acc.HasTag("mongodb_shard_stats", "hostname"))
|
||||
hostsFound = append(hostsFound, host)
|
||||
}
|
||||
sort.Strings(hostsFound)
|
||||
sort.Strings(expectedHosts)
|
||||
require.Equal(t, expectedHosts, hostsFound)
|
||||
}
|
||||
|
||||
func TestStateTag(t *testing.T) {
|
||||
d := newMongodbData(
|
||||
&statLine{
|
||||
StorageEngine: "",
|
||||
Time: time.Now(),
|
||||
Insert: 0,
|
||||
Query: 0,
|
||||
NodeType: "PRI",
|
||||
NodeState: "PRIMARY",
|
||||
ReplSetName: "rs1",
|
||||
Version: "3.6.17",
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
stateTags := make(map[string]string)
|
||||
stateTags["node_type"] = "PRI"
|
||||
stateTags["rs_name"] = "rs1"
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.addDefaultStats()
|
||||
d.flush(&acc)
|
||||
fields := map[string]interface{}{
|
||||
"active_reads": int64(0),
|
||||
"active_writes": int64(0),
|
||||
"aggregate_command_failed": int64(0),
|
||||
"aggregate_command_total": int64(0),
|
||||
"assert_msg": int64(0),
|
||||
"assert_regular": int64(0),
|
||||
"assert_rollovers": int64(0),
|
||||
"assert_user": int64(0),
|
||||
"assert_warning": int64(0),
|
||||
"available_reads": int64(0),
|
||||
"available_writes": int64(0),
|
||||
"commands": int64(0),
|
||||
"commands_per_sec": int64(0),
|
||||
"connections_available": int64(0),
|
||||
"connections_current": int64(0),
|
||||
"connections_total_created": int64(0),
|
||||
"count_command_failed": int64(0),
|
||||
"count_command_total": int64(0),
|
||||
"cursor_no_timeout": int64(0),
|
||||
"cursor_no_timeout_count": int64(0),
|
||||
"cursor_pinned": int64(0),
|
||||
"cursor_pinned_count": int64(0),
|
||||
"cursor_timed_out": int64(0),
|
||||
"cursor_timed_out_count": int64(0),
|
||||
"cursor_total": int64(0),
|
||||
"cursor_total_count": int64(0),
|
||||
"delete_command_failed": int64(0),
|
||||
"delete_command_total": int64(0),
|
||||
"deletes": int64(0),
|
||||
"deletes_per_sec": int64(0),
|
||||
"distinct_command_failed": int64(0),
|
||||
"distinct_command_total": int64(0),
|
||||
"document_deleted": int64(0),
|
||||
"document_inserted": int64(0),
|
||||
"document_returned": int64(0),
|
||||
"document_updated": int64(0),
|
||||
"find_and_modify_command_failed": int64(0),
|
||||
"find_and_modify_command_total": int64(0),
|
||||
"find_command_failed": int64(0),
|
||||
"find_command_total": int64(0),
|
||||
"flushes": int64(0),
|
||||
"flushes_per_sec": int64(0),
|
||||
"flushes_total_time_ns": int64(0),
|
||||
"get_more_command_failed": int64(0),
|
||||
"get_more_command_total": int64(0),
|
||||
"getmores": int64(0),
|
||||
"getmores_per_sec": int64(0),
|
||||
"insert_command_failed": int64(0),
|
||||
"insert_command_total": int64(0),
|
||||
"inserts": int64(0),
|
||||
"inserts_per_sec": int64(0),
|
||||
"jumbo_chunks": int64(0),
|
||||
"member_status": "PRI",
|
||||
"net_in_bytes": int64(0),
|
||||
"net_in_bytes_count": int64(0),
|
||||
"net_out_bytes": int64(0),
|
||||
"net_out_bytes_count": int64(0),
|
||||
"open_connections": int64(0),
|
||||
"operation_scan_and_order": int64(0),
|
||||
"operation_write_conflicts": int64(0),
|
||||
"queries": int64(0),
|
||||
"queries_per_sec": int64(0),
|
||||
"queued_reads": int64(0),
|
||||
"queued_writes": int64(0),
|
||||
"repl_apply_batches_num": int64(0),
|
||||
"repl_apply_batches_total_millis": int64(0),
|
||||
"repl_apply_ops": int64(0),
|
||||
"repl_buffer_count": int64(0),
|
||||
"repl_buffer_size_bytes": int64(0),
|
||||
"repl_commands": int64(0),
|
||||
"repl_commands_per_sec": int64(0),
|
||||
"repl_deletes": int64(0),
|
||||
"repl_deletes_per_sec": int64(0),
|
||||
"repl_executor_pool_in_progress_count": int64(0),
|
||||
"repl_executor_queues_network_in_progress": int64(0),
|
||||
"repl_executor_queues_sleepers": int64(0),
|
||||
"repl_executor_unsignaled_events": int64(0),
|
||||
"repl_getmores": int64(0),
|
||||
"repl_getmores_per_sec": int64(0),
|
||||
"repl_inserts": int64(0),
|
||||
"repl_inserts_per_sec": int64(0),
|
||||
"repl_lag": int64(0),
|
||||
"repl_network_bytes": int64(0),
|
||||
"repl_network_getmores_num": int64(0),
|
||||
"repl_network_getmores_total_millis": int64(0),
|
||||
"repl_network_ops": int64(0),
|
||||
"repl_queries": int64(0),
|
||||
"repl_queries_per_sec": int64(0),
|
||||
"repl_updates": int64(0),
|
||||
"repl_updates_per_sec": int64(0),
|
||||
"repl_state": int64(0),
|
||||
"repl_member_health": int64(0),
|
||||
"repl_health_avg": float64(0),
|
||||
"resident_megabytes": int64(0),
|
||||
"state": "PRIMARY",
|
||||
"storage_freelist_search_bucket_exhausted": int64(0),
|
||||
"storage_freelist_search_requests": int64(0),
|
||||
"storage_freelist_search_scanned": int64(0),
|
||||
"tcmalloc_central_cache_free_bytes": int64(0),
|
||||
"tcmalloc_current_allocated_bytes": int64(0),
|
||||
"tcmalloc_current_total_thread_cache_bytes": int64(0),
|
||||
"tcmalloc_heap_size": int64(0),
|
||||
"tcmalloc_max_total_thread_cache_bytes": int64(0),
|
||||
"tcmalloc_pageheap_commit_count": int64(0),
|
||||
"tcmalloc_pageheap_committed_bytes": int64(0),
|
||||
"tcmalloc_pageheap_decommit_count": int64(0),
|
||||
"tcmalloc_pageheap_free_bytes": int64(0),
|
||||
"tcmalloc_pageheap_reserve_count": int64(0),
|
||||
"tcmalloc_pageheap_scavenge_count": int64(0),
|
||||
"tcmalloc_pageheap_total_commit_bytes": int64(0),
|
||||
"tcmalloc_pageheap_total_decommit_bytes": int64(0),
|
||||
"tcmalloc_pageheap_total_reserve_bytes": int64(0),
|
||||
"tcmalloc_pageheap_unmapped_bytes": int64(0),
|
||||
"tcmalloc_spinlock_total_delay_ns": int64(0),
|
||||
"tcmalloc_thread_cache_free_bytes": int64(0),
|
||||
"tcmalloc_total_free_bytes": int64(0),
|
||||
"tcmalloc_transfer_cache_free_bytes": int64(0),
|
||||
"total_available": int64(0),
|
||||
"total_created": int64(0),
|
||||
"total_docs_scanned": int64(0),
|
||||
"total_in_use": int64(0),
|
||||
"total_keys_scanned": int64(0),
|
||||
"total_refreshing": int64(0),
|
||||
"total_tickets_reads": int64(0),
|
||||
"total_tickets_writes": int64(0),
|
||||
"ttl_deletes": int64(0),
|
||||
"ttl_deletes_per_sec": int64(0),
|
||||
"ttl_passes": int64(0),
|
||||
"ttl_passes_per_sec": int64(0),
|
||||
"update_command_failed": int64(0),
|
||||
"update_command_total": int64(0),
|
||||
"updates": int64(0),
|
||||
"updates_per_sec": int64(0),
|
||||
"uptime_ns": int64(0),
|
||||
"version": "3.6.17",
|
||||
"vsize_megabytes": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags)
|
||||
}
|
||||
|
||||
func TestAddTopStats(t *testing.T) {
|
||||
collections := []string{"collectionOne", "collectionTwo"}
|
||||
topStatLines := make([]topStatLine, 0, len(collections))
|
||||
for _, collection := range collections {
|
||||
topStatLine := topStatLine{
|
||||
CollectionName: collection,
|
||||
TotalTime: 0,
|
||||
TotalCount: 0,
|
||||
ReadLockTime: 0,
|
||||
ReadLockCount: 0,
|
||||
WriteLockTime: 0,
|
||||
WriteLockCount: 0,
|
||||
QueriesTime: 0,
|
||||
QueriesCount: 0,
|
||||
GetMoreTime: 0,
|
||||
GetMoreCount: 0,
|
||||
InsertTime: 0,
|
||||
InsertCount: 0,
|
||||
UpdateTime: 0,
|
||||
UpdateCount: 0,
|
||||
RemoveTime: 0,
|
||||
RemoveCount: 0,
|
||||
CommandsTime: 0,
|
||||
CommandsCount: 0,
|
||||
}
|
||||
topStatLines = append(topStatLines, topStatLine)
|
||||
}
|
||||
|
||||
d := newMongodbData(
|
||||
&statLine{
|
||||
TopStatLines: topStatLines,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
d.addTopStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for range topStatLines {
|
||||
for key := range topDataStats {
|
||||
require.True(t, acc.HasInt64Field("mongodb_top_stats", key))
|
||||
}
|
||||
}
|
||||
}
|
379
plugins/inputs/mongodb/mongodb_server.go
Normal file
379
plugins/inputs/mongodb/mongodb_server.go
Normal file
|
@ -0,0 +1,379 @@
|
|||
package mongodb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type server struct {
|
||||
client *mongo.Client
|
||||
hostname string
|
||||
lastResult *mongoStatus
|
||||
|
||||
log telegraf.Logger
|
||||
}
|
||||
|
||||
type oplogEntry struct {
|
||||
Timestamp primitive.Timestamp `bson:"ts"`
|
||||
}
|
||||
|
||||
func isAuthorization(err error) bool {
|
||||
return strings.Contains(err.Error(), "not authorized")
|
||||
}
|
||||
|
||||
func (s *server) getDefaultTags() map[string]string {
|
||||
tags := make(map[string]string)
|
||||
tags["hostname"] = s.hostname
|
||||
return tags
|
||||
}
|
||||
|
||||
func (s *server) ping() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
return s.client.Ping(ctx, nil)
|
||||
}
|
||||
|
||||
func (s *server) authLog(err error) {
|
||||
if isAuthorization(err) {
|
||||
s.log.Debug(err.Error())
|
||||
} else {
|
||||
s.log.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) runCommand(database string, cmd, result interface{}) error {
|
||||
r := s.client.Database(database).RunCommand(context.Background(), cmd)
|
||||
if r.Err() != nil {
|
||||
return r.Err()
|
||||
}
|
||||
return r.Decode(result)
|
||||
}
|
||||
|
||||
func (s *server) gatherServerStatus() (*serverStatus, error) {
|
||||
serverStatus := &serverStatus{}
|
||||
err := s.runCommand("admin", bson.D{
|
||||
{
|
||||
Key: "serverStatus",
|
||||
Value: 1,
|
||||
},
|
||||
{
|
||||
Key: "recordStats",
|
||||
Value: 0,
|
||||
},
|
||||
}, serverStatus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return serverStatus, nil
|
||||
}
|
||||
|
||||
func (s *server) gatherReplSetStatus() (*replSetStatus, error) {
|
||||
replSetStatus := &replSetStatus{}
|
||||
err := s.runCommand("admin", bson.D{
|
||||
{
|
||||
Key: "replSetGetStatus",
|
||||
Value: 1,
|
||||
},
|
||||
}, replSetStatus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return replSetStatus, nil
|
||||
}
|
||||
|
||||
func (s *server) gatherTopStatData() (*topStats, error) {
|
||||
var dest map[string]interface{}
|
||||
err := s.runCommand("admin", bson.D{
|
||||
{
|
||||
Key: "top",
|
||||
Value: 1,
|
||||
},
|
||||
}, &dest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed running admin cmd: %w", err)
|
||||
}
|
||||
|
||||
totals, ok := dest["totals"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, errors.New("collection totals not found or not a map")
|
||||
}
|
||||
delete(totals, "note")
|
||||
|
||||
recorded, err := bson.Marshal(totals)
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to marshal totals")
|
||||
}
|
||||
|
||||
topInfo := make(map[string]topStatCollection)
|
||||
if err := bson.Unmarshal(recorded, &topInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed unmarshalling records: %w", err)
|
||||
}
|
||||
|
||||
return &topStats{Totals: topInfo}, nil
|
||||
}
|
||||
|
||||
func (s *server) gatherClusterStatus() (*clusterStatus, error) {
|
||||
chunkCount, err := s.client.Database("config").Collection("chunks").CountDocuments(context.Background(), bson.M{"jumbo": true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &clusterStatus{
|
||||
JumboChunksCount: chunkCount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func poolStatsCommand(version string) (string, error) {
|
||||
majorPart := string(version[0])
|
||||
major, err := strconv.ParseInt(majorPart, 10, 64)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if major >= 5 {
|
||||
return "connPoolStats", nil
|
||||
}
|
||||
return "shardConnPoolStats", nil
|
||||
}
|
||||
|
||||
func (s *server) gatherShardConnPoolStats(version string) (*shardStats, error) {
|
||||
command, err := poolStatsCommand(version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
shardStats := &shardStats{}
|
||||
err = s.runCommand("admin", bson.D{
|
||||
{
|
||||
Key: command,
|
||||
Value: 1,
|
||||
},
|
||||
}, &shardStats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return shardStats, nil
|
||||
}
|
||||
|
||||
func (s *server) gatherDBStats(name string) (*db, error) {
|
||||
stats := &dbStatsData{}
|
||||
err := s.runCommand(name, bson.D{
|
||||
{
|
||||
Key: "dbStats",
|
||||
Value: 1,
|
||||
},
|
||||
}, stats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &db{
|
||||
Name: name,
|
||||
DBStatsData: stats,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *server) getOplogReplLag(collection string) (*oplogStats, error) {
|
||||
query := bson.M{"ts": bson.M{"$exists": true}}
|
||||
|
||||
var first oplogEntry
|
||||
firstResult := s.client.Database("local").Collection(collection).FindOne(context.Background(), query, options.FindOne().SetSort(bson.M{"$natural": 1}))
|
||||
if firstResult.Err() != nil {
|
||||
return nil, firstResult.Err()
|
||||
}
|
||||
if err := firstResult.Decode(&first); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var last oplogEntry
|
||||
lastResult := s.client.Database("local").Collection(collection).FindOne(context.Background(), query, options.FindOne().SetSort(bson.M{"$natural": -1}))
|
||||
if lastResult.Err() != nil {
|
||||
return nil, lastResult.Err()
|
||||
}
|
||||
if err := lastResult.Decode(&last); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
firstTime := time.Unix(int64(first.Timestamp.T), 0)
|
||||
lastTime := time.Unix(int64(last.Timestamp.T), 0)
|
||||
stats := &oplogStats{
|
||||
TimeDiff: int64(lastTime.Sub(firstTime).Seconds()),
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// The "oplog.rs" collection is stored on all replica set members.
|
||||
//
|
||||
// The "oplog.$main" collection is created on the master node of a
|
||||
// master-slave replicated deployment. As of MongoDB 3.2, master-slave
|
||||
// replication has been deprecated.
|
||||
func (s *server) gatherOplogStats() (*oplogStats, error) {
|
||||
stats, err := s.getOplogReplLag("oplog.rs")
|
||||
if err == nil {
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
return s.getOplogReplLag("oplog.$main")
|
||||
}
|
||||
|
||||
func (s *server) gatherCollectionStats(colStatsDBs []string) (*colStats, error) {
|
||||
names, err := s.client.ListDatabaseNames(context.Background(), bson.D{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results := &colStats{}
|
||||
for _, dbName := range names {
|
||||
if slices.Contains(colStatsDBs, dbName) || len(colStatsDBs) == 0 {
|
||||
// skip views as they fail on collStats below
|
||||
filter := bson.M{"type": bson.M{"$in": bson.A{"collection", "timeseries"}}}
|
||||
|
||||
var colls []string
|
||||
colls, err = s.client.Database(dbName).ListCollectionNames(context.Background(), filter)
|
||||
if err != nil {
|
||||
s.log.Errorf("Error getting collection names: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
for _, colName := range colls {
|
||||
colStatLine := &colStatsData{}
|
||||
err = s.runCommand(dbName, bson.D{
|
||||
{
|
||||
Key: "collStats",
|
||||
Value: colName,
|
||||
},
|
||||
}, colStatLine)
|
||||
if err != nil {
|
||||
s.authLog(fmt.Errorf("error getting col stats from %q: %w", colName, err))
|
||||
continue
|
||||
}
|
||||
collection := &collection{
|
||||
Name: colName,
|
||||
DBName: dbName,
|
||||
ColStatsData: colStatLine,
|
||||
}
|
||||
results.Collections = append(results.Collections, *collection)
|
||||
}
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (s *server) gatherData(acc telegraf.Accumulator, gatherClusterStatus, gatherDBStats, gatherColStats, gatherTopStat bool, colStatsDBs []string) error {
|
||||
serverStatus, err := s.gatherServerStatus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get replica set status, an error indicates that the server is not a
|
||||
// member of a replica set.
|
||||
replSetStatus, err := s.gatherReplSetStatus()
|
||||
if err != nil {
|
||||
s.log.Debugf("Unable to gather replica set status: %s", err.Error())
|
||||
}
|
||||
|
||||
// Gather the oplog if we are a member of a replica set. Non-replica set
|
||||
// members do not have the oplog collections.
|
||||
var oplogStats *oplogStats
|
||||
if replSetStatus != nil {
|
||||
oplogStats, err = s.gatherOplogStats()
|
||||
if err != nil {
|
||||
s.authLog(fmt.Errorf("unable to get oplog stats: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
var clusterStatus *clusterStatus
|
||||
if gatherClusterStatus {
|
||||
status, err := s.gatherClusterStatus()
|
||||
if err != nil {
|
||||
s.log.Debugf("Unable to gather cluster status: %s", err.Error())
|
||||
}
|
||||
clusterStatus = status
|
||||
}
|
||||
|
||||
shardStats, err := s.gatherShardConnPoolStats(serverStatus.Version)
|
||||
if err != nil {
|
||||
s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %w", err))
|
||||
}
|
||||
|
||||
var collectionStats *colStats
|
||||
if gatherColStats {
|
||||
stats, err := s.gatherCollectionStats(colStatsDBs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
collectionStats = stats
|
||||
}
|
||||
|
||||
dbStats := &dbStats{}
|
||||
if gatherDBStats {
|
||||
names, err := s.client.ListDatabaseNames(context.Background(), bson.D{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
db, err := s.gatherDBStats(name)
|
||||
if err != nil {
|
||||
s.log.Errorf("Error getting db stats from %q: %v", name, err)
|
||||
continue
|
||||
}
|
||||
dbStats.DBs = append(dbStats.DBs, *db)
|
||||
}
|
||||
}
|
||||
|
||||
topStatData := &topStats{}
|
||||
if gatherTopStat {
|
||||
topStats, err := s.gatherTopStatData()
|
||||
if err != nil {
|
||||
s.log.Debugf("Unable to gather top stat data: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
topStatData = topStats
|
||||
}
|
||||
|
||||
result := &mongoStatus{
|
||||
ServerStatus: serverStatus,
|
||||
ReplSetStatus: replSetStatus,
|
||||
ClusterStatus: clusterStatus,
|
||||
DBStats: dbStats,
|
||||
ColStats: collectionStats,
|
||||
ShardStats: shardStats,
|
||||
OplogStats: oplogStats,
|
||||
TopStats: topStatData,
|
||||
}
|
||||
|
||||
result.SampleTime = time.Now()
|
||||
if s.lastResult != nil {
|
||||
duration := result.SampleTime.Sub(s.lastResult.SampleTime)
|
||||
durationInSeconds := int64(duration.Seconds())
|
||||
if durationInSeconds == 0 {
|
||||
durationInSeconds = 1
|
||||
}
|
||||
data := newMongodbData(
|
||||
newStatLine(*s.lastResult, *result, s.hostname, durationInSeconds),
|
||||
s.getDefaultTags(),
|
||||
)
|
||||
data.addDefaultStats()
|
||||
data.addDBStats()
|
||||
data.addColStats()
|
||||
data.addShardHostStats()
|
||||
data.addTopStats()
|
||||
data.flush(acc)
|
||||
}
|
||||
|
||||
s.lastResult = result
|
||||
return nil
|
||||
}
|
189
plugins/inputs/mongodb/mongodb_server_test.go
Normal file
189
plugins/inputs/mongodb/mongodb_server_test.go
Normal file
|
@ -0,0 +1,189 @@
|
|||
package mongodb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var servicePort = "27017"
|
||||
var unreachableMongoEndpoint = "mongodb://user:pass@127.0.0.1:27017/nop"
|
||||
|
||||
func createTestServer(t *testing.T) *testutil.Container {
|
||||
container := testutil.Container{
|
||||
Image: "mongo",
|
||||
ExposedPorts: []string{servicePort},
|
||||
WaitingFor: wait.ForAll(
|
||||
wait.NewHTTPStrategy("/").WithPort(nat.Port(servicePort)),
|
||||
wait.ForLog("Waiting for connections"),
|
||||
),
|
||||
}
|
||||
err := container.Start()
|
||||
require.NoError(t, err, "failed to start container")
|
||||
|
||||
return &container
|
||||
}
|
||||
|
||||
func TestGetDefaultTagsIntegration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
container := createTestServer(t)
|
||||
defer container.Terminate()
|
||||
|
||||
m := &MongoDB{
|
||||
Log: testutil.Logger{},
|
||||
Servers: []string{
|
||||
fmt.Sprintf("mongodb://%s:%s", container.Address, container.Ports[servicePort]),
|
||||
},
|
||||
}
|
||||
err := m.Init()
|
||||
require.NoError(t, err)
|
||||
var acc testutil.Accumulator
|
||||
err = m.Start(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
server := m.clients[0]
|
||||
|
||||
var tagTests = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"hostname", server.hostname},
|
||||
}
|
||||
defaultTags := server.getDefaultTags()
|
||||
for _, tt := range tagTests {
|
||||
if defaultTags[tt.in] != tt.out {
|
||||
t.Errorf("expected %q, got %q", tt.out, defaultTags[tt.in])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddDefaultStatsIntegration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
container := createTestServer(t)
|
||||
defer container.Terminate()
|
||||
|
||||
m := &MongoDB{
|
||||
Log: testutil.Logger{},
|
||||
Servers: []string{
|
||||
fmt.Sprintf("mongodb://%s:%s", container.Address, container.Ports[servicePort]),
|
||||
},
|
||||
}
|
||||
err := m.Init()
|
||||
require.NoError(t, err)
|
||||
var acc testutil.Accumulator
|
||||
err = m.Start(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
server := m.clients[0]
|
||||
|
||||
err = server.gatherData(&acc, false, true, true, true, []string{"local"})
|
||||
require.NoError(t, err)
|
||||
|
||||
// need to call this twice so it can perform the diff
|
||||
err = server.gatherData(&acc, false, true, true, true, []string{"local"})
|
||||
require.NoError(t, err)
|
||||
|
||||
for key := range defaultStats {
|
||||
require.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that when set to skip, telegraf will init, start, and collect while
|
||||
// ignoring connection errors.
|
||||
func TestSkipBehaviorIntegration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
m := &MongoDB{
|
||||
Log: &testutil.CaptureLogger{},
|
||||
Servers: []string{unreachableMongoEndpoint},
|
||||
}
|
||||
|
||||
m.DisconnectedServersBehavior = "skip"
|
||||
err := m.Init()
|
||||
require.NoError(t, err)
|
||||
var acc testutil.Accumulator
|
||||
err = m.Start(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = m.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify that when set to error, telegraf will error out on start as expected
|
||||
func TestErrorBehaviorIntegration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
m := &MongoDB{
|
||||
Log: &testutil.CaptureLogger{},
|
||||
Servers: []string{unreachableMongoEndpoint},
|
||||
DisconnectedServersBehavior: "error",
|
||||
}
|
||||
|
||||
err := m.Init()
|
||||
require.NoError(t, err)
|
||||
var acc testutil.Accumulator
|
||||
err = m.Start(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestPoolStatsVersionCompatibility(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
version string
|
||||
expectedCommand string
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "mongodb v3",
|
||||
version: "3.0.0",
|
||||
expectedCommand: "shardConnPoolStats",
|
||||
},
|
||||
{
|
||||
name: "mongodb v4",
|
||||
version: "4.0.0",
|
||||
expectedCommand: "shardConnPoolStats",
|
||||
},
|
||||
{
|
||||
name: "mongodb v5",
|
||||
version: "5.0.0",
|
||||
expectedCommand: "connPoolStats",
|
||||
},
|
||||
{
|
||||
name: "mongodb v6",
|
||||
version: "6.0.0",
|
||||
expectedCommand: "connPoolStats",
|
||||
},
|
||||
{
|
||||
name: "invalid version",
|
||||
version: "v4",
|
||||
err: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
command, err := poolStatsCommand(test.version)
|
||||
require.Equal(t, test.expectedCommand, command)
|
||||
if test.err {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
1445
plugins/inputs/mongodb/mongostat.go
Normal file
1445
plugins/inputs/mongodb/mongostat.go
Normal file
File diff suppressed because it is too large
Load diff
597
plugins/inputs/mongodb/mongostat_test.go
Normal file
597
plugins/inputs/mongodb/mongostat_test.go
Normal file
|
@ -0,0 +1,597 @@
|
|||
package mongodb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLatencyStats(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
Supported: false,
|
||||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
Supported: false,
|
||||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &opLatenciesStats{
|
||||
Reads: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Writes: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Commands: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Equal(t, int64(0), sl.CommandLatency)
|
||||
require.Equal(t, int64(0), sl.ReadLatency)
|
||||
require.Equal(t, int64(0), sl.WriteLatency)
|
||||
require.Equal(t, int64(0), sl.CommandOpsCnt)
|
||||
require.Equal(t, int64(0), sl.ReadOpsCnt)
|
||||
require.Equal(t, int64(0), sl.WriteOpsCnt)
|
||||
}
|
||||
|
||||
func TestLatencyStatsDiffZero(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
Supported: false,
|
||||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &opLatenciesStats{
|
||||
Reads: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Writes: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Commands: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
Supported: false,
|
||||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &opLatenciesStats{
|
||||
Reads: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Writes: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Commands: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Equal(t, int64(0), sl.CommandLatency)
|
||||
require.Equal(t, int64(0), sl.ReadLatency)
|
||||
require.Equal(t, int64(0), sl.WriteLatency)
|
||||
require.Equal(t, int64(0), sl.CommandOpsCnt)
|
||||
require.Equal(t, int64(0), sl.ReadOpsCnt)
|
||||
require.Equal(t, int64(0), sl.WriteOpsCnt)
|
||||
}
|
||||
|
||||
func TestLatencyStatsDiff(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
Supported: false,
|
||||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &opLatenciesStats{
|
||||
Reads: &latencyStats{
|
||||
Ops: 4189041956,
|
||||
Latency: 2255922322753,
|
||||
},
|
||||
Writes: &latencyStats{
|
||||
Ops: 1691019457,
|
||||
Latency: 494478256915,
|
||||
},
|
||||
Commands: &latencyStats{
|
||||
Ops: 1019150402,
|
||||
Latency: 59177710371,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
Supported: false,
|
||||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &opLatenciesStats{
|
||||
Reads: &latencyStats{
|
||||
Ops: 4189049884,
|
||||
Latency: 2255946760057,
|
||||
},
|
||||
Writes: &latencyStats{
|
||||
Ops: 1691021287,
|
||||
Latency: 494479456987,
|
||||
},
|
||||
Commands: &latencyStats{
|
||||
Ops: 1019152861,
|
||||
Latency: 59177981552,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Equal(t, int64(59177981552), sl.CommandLatency)
|
||||
require.Equal(t, int64(2255946760057), sl.ReadLatency)
|
||||
require.Equal(t, int64(494479456987), sl.WriteLatency)
|
||||
require.Equal(t, int64(1019152861), sl.CommandOpsCnt)
|
||||
require.Equal(t, int64(4189049884), sl.ReadOpsCnt)
|
||||
require.Equal(t, int64(1691021287), sl.WriteOpsCnt)
|
||||
}
|
||||
|
||||
func TestLocksStatsNilWhenLocksMissingInOldStat(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Nil(t, sl.CollectionLocks)
|
||||
}
|
||||
|
||||
func TestLocksStatsNilWhenGlobalLockStatsMissingInOldStat(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Nil(t, sl.CollectionLocks)
|
||||
}
|
||||
|
||||
func TestLocksStatsNilWhenGlobalLockStatsEmptyInOldStat(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Nil(t, sl.CollectionLocks)
|
||||
}
|
||||
|
||||
func TestLocksStatsNilWhenCollectionLockStatsMissingInOldStat(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Nil(t, sl.CollectionLocks)
|
||||
}
|
||||
|
||||
func TestLocksStatsNilWhenCollectionLockStatsEmptyInOldStat(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
"Collection": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Nil(t, sl.CollectionLocks)
|
||||
}
|
||||
|
||||
func TestLocksStatsNilWhenLocksMissingInNewStat(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Nil(t, sl.CollectionLocks)
|
||||
}
|
||||
|
||||
func TestLocksStatsNilWhenGlobalLockStatsMissingInNewStat(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Nil(t, sl.CollectionLocks)
|
||||
}
|
||||
|
||||
func TestLocksStatsNilWhenGlobalLockStatsEmptyInNewStat(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Nil(t, sl.CollectionLocks)
|
||||
}
|
||||
|
||||
func TestLocksStatsNilWhenCollectionLockStatsMissingInNewStat(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Nil(t, sl.CollectionLocks)
|
||||
}
|
||||
|
||||
func TestLocksStatsNilWhenCollectionLockStatsEmptyInNewStat(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
"Collection": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
require.Nil(t, sl.CollectionLocks)
|
||||
}
|
||||
|
||||
func TestLocksStatsPopulated(t *testing.T) {
|
||||
sl := newStatLine(
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
"Collection": {
|
||||
AcquireWaitCount: &readWriteLockTimes{
|
||||
Read: 1,
|
||||
Write: 2,
|
||||
},
|
||||
AcquireCount: &readWriteLockTimes{
|
||||
Read: 5,
|
||||
Write: 10,
|
||||
},
|
||||
TimeAcquiringMicros: readWriteLockTimes{
|
||||
Read: 100,
|
||||
Write: 200,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
"Collection": {
|
||||
AcquireWaitCount: &readWriteLockTimes{
|
||||
Read: 2,
|
||||
Write: 4,
|
||||
},
|
||||
AcquireCount: &readWriteLockTimes{
|
||||
Read: 10,
|
||||
Write: 30,
|
||||
},
|
||||
TimeAcquiringMicros: readWriteLockTimes{
|
||||
Read: 250,
|
||||
Write: 310,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
60,
|
||||
)
|
||||
|
||||
expected := &collectionLockStatus{
|
||||
ReadAcquireWaitsPercentage: 20,
|
||||
WriteAcquireWaitsPercentage: 10,
|
||||
ReadAcquireTimeMicros: 150,
|
||||
WriteAcquireTimeMicros: 55,
|
||||
}
|
||||
|
||||
require.Equal(t, expected, sl.CollectionLocks)
|
||||
}
|
44
plugins/inputs/mongodb/sample.conf
Normal file
44
plugins/inputs/mongodb/sample.conf
Normal file
|
@ -0,0 +1,44 @@
|
|||
# Read metrics from one or many MongoDB servers
|
||||
[[inputs.mongodb]]
|
||||
## An array of URLs of the form:
|
||||
## "mongodb://" [user ":" pass "@"] host [ ":" port]
|
||||
## For example:
|
||||
## mongodb://user:auth_key@10.10.3.30:27017,
|
||||
## mongodb://10.10.3.33:18832,
|
||||
##
|
||||
## If connecting to a cluster, users must include the "?connect=direct" in
|
||||
## the URL to ensure that the connection goes directly to the specified node
|
||||
## and not have all connections passed to the master node.
|
||||
servers = ["mongodb://127.0.0.1:27017/?connect=direct"]
|
||||
|
||||
## When true, collect cluster status.
|
||||
## Note that the query that counts jumbo chunks triggers a COLLSCAN, which
|
||||
## may have an impact on performance.
|
||||
# gather_cluster_status = true
|
||||
|
||||
## When true, collect per database stats
|
||||
# gather_perdb_stats = false
|
||||
|
||||
## When true, collect per collection stats
|
||||
# gather_col_stats = false
|
||||
|
||||
## When true, collect usage statistics for each collection
|
||||
## (insert, update, queries, remove, getmore, commands etc...).
|
||||
# gather_top_stat = false
|
||||
|
||||
## List of db where collections stats are collected
|
||||
## If empty, all db are concerned
|
||||
# col_stats_dbs = ["local"]
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Specifies plugin behavior regarding disconnected servers
|
||||
## Available choices :
|
||||
## - error: telegraf will return an error on startup if one the servers is unreachable
|
||||
## - skip: telegraf will skip unreachable servers on both startup and gather
|
||||
# disconnected_servers_behavior = "error"
|
Loading…
Add table
Add a link
Reference in a new issue