Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
654
plugins/inputs/varnish/README.md
Normal file
654
plugins/inputs/varnish/README.md
Normal file
|
@ -0,0 +1,654 @@
|
|||
# Varnish Input Plugin
|
||||
|
||||
This plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/)
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# A plugin to collect stats from Varnish HTTP Cache
|
||||
# This plugin ONLY supports non-Windows
|
||||
[[inputs.varnish]]
|
||||
## If running as a restricted user you can prepend sudo for additional access:
|
||||
#use_sudo = false
|
||||
|
||||
## The default location of the varnishstat binary can be overridden with:
|
||||
binary = "/usr/bin/varnishstat"
|
||||
|
||||
## Additional custom arguments for the varnishstat command
|
||||
# binary_args = ["-f", "MAIN.*"]
|
||||
|
||||
## The default location of the varnishadm binary can be overridden with:
|
||||
adm_binary = "/usr/bin/varnishadm"
|
||||
|
||||
## Custom arguments for the varnishadm command
|
||||
# adm_binary_args = [""]
|
||||
|
||||
## Metric version defaults to metric_version=1, use metric_version=2 for removal of nonactive vcls
|
||||
## Varnish 6.0.2 and newer is required for metric_version=2.
|
||||
metric_version = 1
|
||||
|
||||
## Additional regexps to override builtin conversion of varnish metrics into telegraf metrics.
|
||||
## Regexp group "_vcl" is used for extracting the VCL name. Metrics that contain nonactive VCL's are skipped.
|
||||
## Regexp group "_field" overrides the field name. Other named regexp groups are used as tags.
|
||||
# regexps = ['^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P<group>[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val']
|
||||
|
||||
## By default, telegraf gather stats for 3 metric points.
|
||||
## Setting stats will override the defaults shown below.
|
||||
## Glob matching can be used, ie, stats = ["MAIN.*"]
|
||||
## stats may also be set to ["*"], which will collect all stats
|
||||
stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
|
||||
|
||||
## Optional name for the varnish instance (or working directory) to query
|
||||
## Usually append after -n in varnish cli
|
||||
# instance_name = instanceName
|
||||
|
||||
## Timeout for varnishstat command
|
||||
# timeout = "1s"
|
||||
```
|
||||
|
||||
## Metrics
|
||||
|
||||
### metric_version=1
|
||||
|
||||
This is the full list of stats provided by varnish. Stats will be grouped by
|
||||
their capitalized prefix (eg MAIN, MEMPOOL, etc). In the output, the prefix will
|
||||
be used as a tag, and removed from field names.
|
||||
|
||||
- varnish
|
||||
- MAIN.uptime (uint64, count, Child process uptime)
|
||||
- MAIN.sess_conn (uint64, count, Sessions accepted)
|
||||
- MAIN.sess_drop (uint64, count, Sessions dropped)
|
||||
- MAIN.sess_fail (uint64, count, Session accept failures)
|
||||
- MAIN.sess_pipe_overflow (uint64, count, Session pipe overflow)
|
||||
- MAIN.client_req_400 (uint64, count, Client requests received,)
|
||||
- MAIN.client_req_411 (uint64, count, Client requests received,)
|
||||
- MAIN.client_req_413 (uint64, count, Client requests received,)
|
||||
- MAIN.client_req_417 (uint64, count, Client requests received,)
|
||||
- MAIN.client_req (uint64, count, Good client requests)
|
||||
- MAIN.cache_hit (uint64, count, Cache hits)
|
||||
- MAIN.cache_hitpass (uint64, count, Cache hits for)
|
||||
- MAIN.cache_miss (uint64, count, Cache misses)
|
||||
- MAIN.backend_conn (uint64, count, Backend conn. success)
|
||||
- MAIN.backend_unhealthy (uint64, count, Backend conn. not)
|
||||
- MAIN.backend_busy (uint64, count, Backend conn. too)
|
||||
- MAIN.backend_fail (uint64, count, Backend conn. failures)
|
||||
- MAIN.backend_reuse (uint64, count, Backend conn. reuses)
|
||||
- MAIN.backend_toolate (uint64, count, Backend conn. was)
|
||||
- MAIN.backend_recycle (uint64, count, Backend conn. recycles)
|
||||
- MAIN.backend_retry (uint64, count, Backend conn. retry)
|
||||
- MAIN.fetch_head (uint64, count, Fetch no body)
|
||||
- MAIN.fetch_length (uint64, count, Fetch with Length)
|
||||
- MAIN.fetch_chunked (uint64, count, Fetch chunked)
|
||||
- MAIN.fetch_eof (uint64, count, Fetch EOF)
|
||||
- MAIN.fetch_bad (uint64, count, Fetch bad T- E)
|
||||
- MAIN.fetch_close (uint64, count, Fetch wanted close)
|
||||
- MAIN.fetch_oldhttp (uint64, count, Fetch pre HTTP/1.1)
|
||||
- MAIN.fetch_zero (uint64, count, Fetch zero len)
|
||||
- MAIN.fetch_1xx (uint64, count, Fetch no body)
|
||||
- MAIN.fetch_204 (uint64, count, Fetch no body)
|
||||
- MAIN.fetch_304 (uint64, count, Fetch no body)
|
||||
- MAIN.fetch_failed (uint64, count, Fetch failed (all)
|
||||
- MAIN.fetch_no_thread (uint64, count, Fetch failed (no)
|
||||
- MAIN.pools (uint64, count, Number of thread)
|
||||
- MAIN.threads (uint64, count, Total number of)
|
||||
- MAIN.threads_limited (uint64, count, Threads hit max)
|
||||
- MAIN.threads_created (uint64, count, Threads created)
|
||||
- MAIN.threads_destroyed (uint64, count, Threads destroyed)
|
||||
- MAIN.threads_failed (uint64, count, Thread creation failed)
|
||||
- MAIN.thread_queue_len (uint64, count, Length of session)
|
||||
- MAIN.busy_sleep (uint64, count, Number of requests)
|
||||
- MAIN.busy_wakeup (uint64, count, Number of requests)
|
||||
- MAIN.sess_queued (uint64, count, Sessions queued for)
|
||||
- MAIN.sess_dropped (uint64, count, Sessions dropped for)
|
||||
- MAIN.n_object (uint64, count, object structs made)
|
||||
- MAIN.n_vampireobject (uint64, count, unresurrected objects)
|
||||
- MAIN.n_objectcore (uint64, count, objectcore structs made)
|
||||
- MAIN.n_objecthead (uint64, count, objecthead structs made)
|
||||
- MAIN.n_waitinglist (uint64, count, waitinglist structs made)
|
||||
- MAIN.n_backend (uint64, count, Number of backends)
|
||||
- MAIN.n_expired (uint64, count, Number of expired)
|
||||
- MAIN.n_lru_nuked (uint64, count, Number of LRU)
|
||||
- MAIN.n_lru_moved (uint64, count, Number of LRU)
|
||||
- MAIN.losthdr (uint64, count, HTTP header overflows)
|
||||
- MAIN.s_sess (uint64, count, Total sessions seen)
|
||||
- MAIN.s_req (uint64, count, Total requests seen)
|
||||
- MAIN.s_pipe (uint64, count, Total pipe sessions)
|
||||
- MAIN.s_pass (uint64, count, Total pass- ed requests)
|
||||
- MAIN.s_fetch (uint64, count, Total backend fetches)
|
||||
- MAIN.s_synth (uint64, count, Total synthetic responses)
|
||||
- MAIN.s_req_hdrbytes (uint64, count, Request header bytes)
|
||||
- MAIN.s_req_bodybytes (uint64, count, Request body bytes)
|
||||
- MAIN.s_resp_hdrbytes (uint64, count, Response header bytes)
|
||||
- MAIN.s_resp_bodybytes (uint64, count, Response body bytes)
|
||||
- MAIN.s_pipe_hdrbytes (uint64, count, Pipe request header)
|
||||
- MAIN.s_pipe_in (uint64, count, Piped bytes from)
|
||||
- MAIN.s_pipe_out (uint64, count, Piped bytes to)
|
||||
- MAIN.sess_closed (uint64, count, Session Closed)
|
||||
- MAIN.sess_pipeline (uint64, count, Session Pipeline)
|
||||
- MAIN.sess_readahead (uint64, count, Session Read Ahead)
|
||||
- MAIN.sess_herd (uint64, count, Session herd)
|
||||
- MAIN.shm_records (uint64, count, SHM records)
|
||||
- MAIN.shm_writes (uint64, count, SHM writes)
|
||||
- MAIN.shm_flushes (uint64, count, SHM flushes due)
|
||||
- MAIN.shm_cont (uint64, count, SHM MTX contention)
|
||||
- MAIN.shm_cycles (uint64, count, SHM cycles through)
|
||||
- MAIN.sms_nreq (uint64, count, SMS allocator requests)
|
||||
- MAIN.sms_nobj (uint64, count, SMS outstanding allocations)
|
||||
- MAIN.sms_nbytes (uint64, count, SMS outstanding bytes)
|
||||
- MAIN.sms_balloc (uint64, count, SMS bytes allocated)
|
||||
- MAIN.sms_bfree (uint64, count, SMS bytes freed)
|
||||
- MAIN.backend_req (uint64, count, Backend requests made)
|
||||
- MAIN.n_vcl (uint64, count, Number of loaded)
|
||||
- MAIN.n_vcl_avail (uint64, count, Number of VCLs)
|
||||
- MAIN.n_vcl_discard (uint64, count, Number of discarded)
|
||||
- MAIN.bans (uint64, count, Count of bans)
|
||||
- MAIN.bans_completed (uint64, count, Number of bans)
|
||||
- MAIN.bans_obj (uint64, count, Number of bans)
|
||||
- MAIN.bans_req (uint64, count, Number of bans)
|
||||
- MAIN.bans_added (uint64, count, Bans added)
|
||||
- MAIN.bans_deleted (uint64, count, Bans deleted)
|
||||
- MAIN.bans_tested (uint64, count, Bans tested against)
|
||||
- MAIN.bans_obj_killed (uint64, count, Objects killed by)
|
||||
- MAIN.bans_lurker_tested (uint64, count, Bans tested against)
|
||||
- MAIN.bans_tests_tested (uint64, count, Ban tests tested)
|
||||
- MAIN.bans_lurker_tests_tested (uint64, count, Ban tests tested)
|
||||
- MAIN.bans_lurker_obj_killed (uint64, count, Objects killed by)
|
||||
- MAIN.bans_dups (uint64, count, Bans superseded by)
|
||||
- MAIN.bans_lurker_contention (uint64, count, Lurker gave way)
|
||||
- MAIN.bans_persisted_bytes (uint64, count, Bytes used by)
|
||||
- MAIN.bans_persisted_fragmentation (uint64, count, Extra bytes in)
|
||||
- MAIN.n_purges (uint64, count, Number of purge)
|
||||
- MAIN.n_obj_purged (uint64, count, Number of purged)
|
||||
- MAIN.exp_mailed (uint64, count, Number of objects)
|
||||
- MAIN.exp_received (uint64, count, Number of objects)
|
||||
- MAIN.hcb_nolock (uint64, count, HCB Lookups without)
|
||||
- MAIN.hcb_lock (uint64, count, HCB Lookups with)
|
||||
- MAIN.hcb_insert (uint64, count, HCB Inserts)
|
||||
- MAIN.esi_errors (uint64, count, ESI parse errors)
|
||||
- MAIN.esi_warnings (uint64, count, ESI parse warnings)
|
||||
- MAIN.vmods (uint64, count, Loaded VMODs)
|
||||
- MAIN.n_gzip (uint64, count, Gzip operations)
|
||||
- MAIN.n_gunzip (uint64, count, Gunzip operations)
|
||||
- MAIN.vsm_free (uint64, count, Free VSM space)
|
||||
- MAIN.vsm_used (uint64, count, Used VSM space)
|
||||
- MAIN.vsm_cooling (uint64, count, Cooling VSM space)
|
||||
- MAIN.vsm_overflow (uint64, count, Overflow VSM space)
|
||||
- MAIN.vsm_overflowed (uint64, count, Overflowed VSM space)
|
||||
- MGT.uptime (uint64, count, Management process uptime)
|
||||
- MGT.child_start (uint64, count, Child process started)
|
||||
- MGT.child_exit (uint64, count, Child process normal)
|
||||
- MGT.child_stop (uint64, count, Child process unexpected)
|
||||
- MGT.child_died (uint64, count, Child process died)
|
||||
- MGT.child_dump (uint64, count, Child process core)
|
||||
- MGT.child_panic (uint64, count, Child process panic)
|
||||
- MEMPOOL.vbc.live (uint64, count, In use)
|
||||
- MEMPOOL.vbc.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.vbc.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.vbc.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.vbc.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.vbc.frees (uint64, count, Frees )
|
||||
- MEMPOOL.vbc.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.vbc.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.vbc.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.vbc.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.vbc.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.busyobj.live (uint64, count, In use)
|
||||
- MEMPOOL.busyobj.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.busyobj.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.busyobj.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.busyobj.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.busyobj.frees (uint64, count, Frees )
|
||||
- MEMPOOL.busyobj.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.busyobj.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.busyobj.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.busyobj.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.busyobj.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.req0.live (uint64, count, In use)
|
||||
- MEMPOOL.req0.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.req0.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.req0.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.req0.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.req0.frees (uint64, count, Frees )
|
||||
- MEMPOOL.req0.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.req0.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.req0.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.req0.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.req0.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.sess0.live (uint64, count, In use)
|
||||
- MEMPOOL.sess0.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.sess0.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.sess0.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.sess0.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.sess0.frees (uint64, count, Frees )
|
||||
- MEMPOOL.sess0.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.sess0.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.sess0.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.sess0.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.sess0.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.req1.live (uint64, count, In use)
|
||||
- MEMPOOL.req1.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.req1.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.req1.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.req1.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.req1.frees (uint64, count, Frees )
|
||||
- MEMPOOL.req1.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.req1.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.req1.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.req1.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.req1.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.sess1.live (uint64, count, In use)
|
||||
- MEMPOOL.sess1.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.sess1.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.sess1.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.sess1.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.sess1.frees (uint64, count, Frees )
|
||||
- MEMPOOL.sess1.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.sess1.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.sess1.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.sess1.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.sess1.randry (uint64, count, Pool ran dry)
|
||||
- SMA.s0.c_req (uint64, count, Allocator requests)
|
||||
- SMA.s0.c_fail (uint64, count, Allocator failures)
|
||||
- SMA.s0.c_bytes (uint64, count, Bytes allocated)
|
||||
- SMA.s0.c_freed (uint64, count, Bytes freed)
|
||||
- SMA.s0.g_alloc (uint64, count, Allocations outstanding)
|
||||
- SMA.s0.g_bytes (uint64, count, Bytes outstanding)
|
||||
- SMA.s0.g_space (uint64, count, Bytes available)
|
||||
- SMA.Transient.c_req (uint64, count, Allocator requests)
|
||||
- SMA.Transient.c_fail (uint64, count, Allocator failures)
|
||||
- SMA.Transient.c_bytes (uint64, count, Bytes allocated)
|
||||
- SMA.Transient.c_freed (uint64, count, Bytes freed)
|
||||
- SMA.Transient.g_alloc (uint64, count, Allocations outstanding)
|
||||
- SMA.Transient.g_bytes (uint64, count, Bytes outstanding)
|
||||
- SMA.Transient.g_space (uint64, count, Bytes available)
|
||||
- VBE.default(127.0.0.1,,8080).vcls (uint64, count, VCL references)
|
||||
- VBE.default(127.0.0.1,,8080).happy (uint64, count, Happy health probes)
|
||||
- VBE.default(127.0.0.1,,8080).bereq_hdrbytes (uint64, count, Req. header bytes)
|
||||
- VBE.default(127.0.0.1,,8080).bereq_bodybytes (uint64, count, Request body bytes)
|
||||
- VBE.default(127.0.0.1,,8080).beresp_hdrbytes (uint64, count, Resp. header bytes)
|
||||
- VBE.default(127.0.0.1,,8080).beresp_bodybytes (uint64, count, Response body bytes)
|
||||
- VBE.default(127.0.0.1,,8080).pipe_hdrbytes (uint64, count, Pipe request header)
|
||||
- VBE.default(127.0.0.1,,8080).pipe_out (uint64, count, Piped bytes to)
|
||||
- VBE.default(127.0.0.1,,8080).pipe_in (uint64, count, Piped bytes from)
|
||||
- LCK.sms.creat (uint64, count, Created locks)
|
||||
- LCK.sms.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.sms.locks (uint64, count, Lock Operations)
|
||||
- LCK.smp.creat (uint64, count, Created locks)
|
||||
- LCK.smp.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.smp.locks (uint64, count, Lock Operations)
|
||||
- LCK.sma.creat (uint64, count, Created locks)
|
||||
- LCK.sma.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.sma.locks (uint64, count, Lock Operations)
|
||||
- LCK.smf.creat (uint64, count, Created locks)
|
||||
- LCK.smf.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.smf.locks (uint64, count, Lock Operations)
|
||||
- LCK.hsl.creat (uint64, count, Created locks)
|
||||
- LCK.hsl.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.hsl.locks (uint64, count, Lock Operations)
|
||||
- LCK.hcb.creat (uint64, count, Created locks)
|
||||
- LCK.hcb.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.hcb.locks (uint64, count, Lock Operations)
|
||||
- LCK.hcl.creat (uint64, count, Created locks)
|
||||
- LCK.hcl.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.hcl.locks (uint64, count, Lock Operations)
|
||||
- LCK.vcl.creat (uint64, count, Created locks)
|
||||
- LCK.vcl.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.vcl.locks (uint64, count, Lock Operations)
|
||||
- LCK.sessmem.creat (uint64, count, Created locks)
|
||||
- LCK.sessmem.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.sessmem.locks (uint64, count, Lock Operations)
|
||||
- LCK.sess.creat (uint64, count, Created locks)
|
||||
- LCK.sess.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.sess.locks (uint64, count, Lock Operations)
|
||||
- LCK.wstat.creat (uint64, count, Created locks)
|
||||
- LCK.wstat.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.wstat.locks (uint64, count, Lock Operations)
|
||||
- LCK.herder.creat (uint64, count, Created locks)
|
||||
- LCK.herder.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.herder.locks (uint64, count, Lock Operations)
|
||||
- LCK.wq.creat (uint64, count, Created locks)
|
||||
- LCK.wq.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.wq.locks (uint64, count, Lock Operations)
|
||||
- LCK.objhdr.creat (uint64, count, Created locks)
|
||||
- LCK.objhdr.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.objhdr.locks (uint64, count, Lock Operations)
|
||||
- LCK.exp.creat (uint64, count, Created locks)
|
||||
- LCK.exp.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.exp.locks (uint64, count, Lock Operations)
|
||||
- LCK.lru.creat (uint64, count, Created locks)
|
||||
- LCK.lru.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.lru.locks (uint64, count, Lock Operations)
|
||||
- LCK.cli.creat (uint64, count, Created locks)
|
||||
- LCK.cli.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.cli.locks (uint64, count, Lock Operations)
|
||||
- LCK.ban.creat (uint64, count, Created locks)
|
||||
- LCK.ban.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.ban.locks (uint64, count, Lock Operations)
|
||||
- LCK.vbp.creat (uint64, count, Created locks)
|
||||
- LCK.vbp.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.vbp.locks (uint64, count, Lock Operations)
|
||||
- LCK.backend.creat (uint64, count, Created locks)
|
||||
- LCK.backend.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.backend.locks (uint64, count, Lock Operations)
|
||||
- LCK.vcapace.creat (uint64, count, Created locks)
|
||||
- LCK.vcapace.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.vcapace.locks (uint64, count, Lock Operations)
|
||||
- LCK.nbusyobj.creat (uint64, count, Created locks)
|
||||
- LCK.nbusyobj.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.nbusyobj.locks (uint64, count, Lock Operations)
|
||||
- LCK.busyobj.creat (uint64, count, Created locks)
|
||||
- LCK.busyobj.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.busyobj.locks (uint64, count, Lock Operations)
|
||||
- LCK.mempool.creat (uint64, count, Created locks)
|
||||
- LCK.mempool.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.mempool.locks (uint64, count, Lock Operations)
|
||||
- LCK.vxid.creat (uint64, count, Created locks)
|
||||
- LCK.vxid.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.vxid.locks (uint64, count, Lock Operations)
|
||||
- LCK.pipestat.creat (uint64, count, Created locks)
|
||||
- LCK.pipestat.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.pipestat.locks (uint64, count, Lock Operations)
|
||||
|
||||
### Tags
|
||||
|
||||
As indicated above, the prefix of a varnish stat will be used as it's 'section'
|
||||
tag. So section tag may have one of the following values:
|
||||
|
||||
- section:
|
||||
- MAIN
|
||||
- MGT
|
||||
- MEMPOOL
|
||||
- SMA
|
||||
- VBE
|
||||
- LCK
|
||||
|
||||
### metric_version=2
|
||||
|
||||
When `metric_version=2` is enabled, the plugin runs `varnishstat -j` command and
|
||||
parses the JSON output into metrics.
|
||||
|
||||
Plugin uses `varnishadm vcl.list -j` commandline to find the active VCL. Metrics
|
||||
that are related to the nonactive VCL are excluded from monitoring.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Varnish 6.0.2+ is required (older versions do not support JSON output from
|
||||
CLI tools)
|
||||
|
||||
## Examples
|
||||
|
||||
Varnish counter:
|
||||
|
||||
```json
|
||||
{
|
||||
"MAIN.cache_hit": {
|
||||
"description": "Cache hits",
|
||||
"flag": "c",
|
||||
"format": "i",
|
||||
"value": 51
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Influx metric:
|
||||
`varnish,section=MAIN cache_hit=51i 1462765437090957980`
|
||||
|
||||
## Advanced customizations using regexps
|
||||
|
||||
Finding the VCL in a varnish measurement and parsing into tags can be adjusted
|
||||
by using GO regular expressions.
|
||||
|
||||
Regexps use a special named group `(?P<_vcl>[\w\-]*)(\.)` to extract VCL
|
||||
name. `(?P<_field>[\w\-.+]*)\.val` regexp group extracts the field name. All
|
||||
other named regexp groups like `(?P<my_tag>[\w\-.+]*)` are tags.
|
||||
|
||||
_Tip: It is useful to verify regexps using online tools like
|
||||
<https://regoio.herokuapp.com/>._
|
||||
|
||||
By default, the plugin has a builtin list of regexps for following VMODs:
|
||||
|
||||
### Dynamic Backends (goto)
|
||||
|
||||
```regex
|
||||
^VBE\.(?P<_vcl>[\w\-]*)\.goto\.[[:alnum:]]+\.\((?P<backend>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P<server>.*)\)\.\(ttl:\d*\.\d*.*\)
|
||||
```
|
||||
|
||||
with data
|
||||
|
||||
```text
|
||||
VBE.VCL12323.goto.000007c8.(123.123.123.123).(http://aaa.xxcc:80).(ttl:3600.000000).cache_hit
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
```text
|
||||
varnish,section=VBE,backend="123.123.123.123",server="http://aaa.xxcc:80" cache_hit=51i 1462765437090957980
|
||||
```
|
||||
|
||||
### Key value storage (kvstore)
|
||||
|
||||
```regex
|
||||
^KVSTORE\.(?P<id>[\w\-]*)\.(?P<_vcl>[\w\-]*)\.([\w\-]*)
|
||||
```
|
||||
|
||||
with data
|
||||
|
||||
```text
|
||||
KVSTORE.object_name.vcl_name.key
|
||||
```
|
||||
|
||||
-> `varnish,section=KVSTORE,id=object_name key=5i`
|
||||
|
||||
### XCNT (libvmod-xcounter)
|
||||
|
||||
```regex
|
||||
^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P<group>[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val
|
||||
```
|
||||
|
||||
with data
|
||||
|
||||
```text
|
||||
XCNT.abc1234.XXX+_YYYY.cr.pass.val
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
```text
|
||||
varnish,section=XCNT,group="XXX+_YYYY.cr" pass=5i
|
||||
```
|
||||
|
||||
### Standard VBE metrics
|
||||
|
||||
```regex
|
||||
^VBE\.(?P<_vcl>[\w\-]*)\.(?P<backend>[\w\-]*)\.([\w\-]*)
|
||||
```
|
||||
|
||||
with data
|
||||
|
||||
```text
|
||||
VBE.reload_20210622_153544_23757.default.unhealthy
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
```text
|
||||
varnish,section=VBE,backend="default" unhealthy=51i 1462765437090957980
|
||||
```
|
||||
|
||||
### Default generic metric
|
||||
|
||||
```regex
|
||||
([\w\-]*)\.(?P<_field>[\w\-.]*)
|
||||
```
|
||||
|
||||
with data
|
||||
|
||||
```text
|
||||
MSE_STORE.store-1-1.g_aio_running_bytes_write
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
```text
|
||||
varnish,section=MSE_STORE store-1-1.g_aio_running_bytes_write=5i
|
||||
```
|
||||
|
||||
The default regexps list can be extended in the telegraf config. The following
|
||||
example shows a config with a custom regexp for parsing of `accounting` VMOD
|
||||
metrics in `ACCG.<namespace>.<key>.<stat_name>` format. The namespace value will
|
||||
be used as a tag.
|
||||
|
||||
```toml
|
||||
[[inputs.varnish]]
|
||||
regexps = ['^ACCG.(?P<namespace>[\w-]*).(?P<_field>[\w-.]*)']
|
||||
```
|
||||
|
||||
## Custom arguments
|
||||
|
||||
You can change the default binary location and custom arguments for
|
||||
`varnishstat` and `varnishadm` command output. This is useful when running
|
||||
varnish in docker or executing using varnish by SSH on a different machine.
|
||||
|
||||
It's important to note that `instance_name` parameter is not take into account
|
||||
when using custom `binary_args` or `adm_binary_args`. You have to add `"-n",
|
||||
"/instance_name"` manually into configuration.
|
||||
|
||||
### Example for SSH
|
||||
|
||||
```toml
|
||||
[[inputs.varnish]]
|
||||
binary = "/usr/bin/ssh"
|
||||
binary_args = ["root@10.100.0.112", "varnishstat", "-n", "/var/lib/varnish/ubuntu", "-j"]
|
||||
adm_binary = "/usr/bin/ssh"
|
||||
adm_binary_args = ["root@10.100.0.112", "varnishadm", "-n", "/var/lib/varnish/ubuntu", "vcl.list", "-j"]
|
||||
metric_version = 2
|
||||
stats = ["*"]
|
||||
```
|
||||
|
||||
### Example for Docker
|
||||
|
||||
```toml
|
||||
[[inputs.varnish]]
|
||||
binary = "/usr/local/bin/docker"
|
||||
binary_args = ["exec", "-t", "container_name", "varnishstat", "-j"]
|
||||
adm_binary = "/usr/local/bin/docker"
|
||||
adm_binary_args = ["exec", "-t", "container_name", "varnishadm", "vcl.list", "-j"]
|
||||
metric_version = 2
|
||||
stats = ["*"]
|
||||
```
|
||||
|
||||
## Permissions
|
||||
|
||||
It's important to note that this plugin references `varnishstat` and
|
||||
`varnishadm`, which may require additional permissions to execute successfully.
|
||||
Depending on the user/group permissions of the telegraf user executing this
|
||||
plugin, you may need to alter the group membership, set facls, or use sudo.
|
||||
|
||||
### Group membership (Recommended)
|
||||
|
||||
```bash
|
||||
$ groups telegraf
|
||||
telegraf : telegraf
|
||||
|
||||
$ usermod -a -G varnish telegraf
|
||||
|
||||
$ groups telegraf
|
||||
telegraf : telegraf varnish
|
||||
```
|
||||
|
||||
### Extended filesystem ACL's
|
||||
|
||||
```bash
|
||||
$ getfacl /var/lib/varnish/<hostname>/_.vsm
|
||||
# file: var/lib/varnish/<hostname>/_.vsm
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
group::r--
|
||||
other::---
|
||||
|
||||
$ setfacl -m u:telegraf:r /var/lib/varnish/<hostname>/_.vsm
|
||||
|
||||
$ getfacl /var/lib/varnish/<hostname>/_.vsm
|
||||
# file: var/lib/varnish/<hostname>/_.vsm
|
||||
# owner: root
|
||||
# group: root
|
||||
user::rw-
|
||||
user:telegraf:r--
|
||||
group::r--
|
||||
mask::r--
|
||||
other::---
|
||||
```
|
||||
|
||||
**Sudo privileges**:
|
||||
If you use this method, you will need the following in your telegraf config:
|
||||
|
||||
```toml
|
||||
[[inputs.varnish]]
|
||||
use_sudo = true
|
||||
```
|
||||
|
||||
You will also need to update your sudoers file:
|
||||
|
||||
```bash
|
||||
$ visudo
|
||||
# Add the following line:
|
||||
Cmnd_Alias VARNISHSTAT = /usr/bin/varnishstat
|
||||
telegraf ALL=(ALL) NOPASSWD: VARNISHSTAT
|
||||
Defaults!VARNISHSTAT !logfile, !syslog, !pam_session
|
||||
```
|
||||
|
||||
Please use the solution you see as most appropriate.
|
||||
|
||||
## Example Output
|
||||
|
||||
### metric_version = 1
|
||||
|
||||
```bash
|
||||
telegraf --config etc/telegraf.conf --input-filter varnish --test
|
||||
* Plugin: varnish, Collection 1
|
||||
> varnish,host=rpercy-VirtualBox,section=MAIN cache_hit=0i,cache_miss=0i,uptime=8416i 1462765437090957980
|
||||
```
|
||||
|
||||
### metric_version = 2
|
||||
|
||||
```bash
|
||||
telegraf --config etc/telegraf.conf --input-filter varnish --test
|
||||
> varnish,host=kozel.local,section=MAIN n_vampireobject=0i 1631121567000000000
|
||||
> varnish,backend=server_test1,host=kozel.local,section=VBE fail_eacces=0i 1631121567000000000
|
||||
> varnish,backend=default,host=kozel.local,section=VBE req=0i 1631121567000000000
|
||||
> varnish,host=kozel.local,section=MAIN client_req_400=0i 1631121567000000000
|
||||
> varnish,host=kozel.local,section=MAIN shm_cycles=10i 1631121567000000000
|
||||
> varnish,backend=default,host=kozel.local,section=VBE pipe_hdrbytes=0i 1631121567000000000
|
||||
```
|
||||
|
||||
You can merge metrics together into a metric with multiple fields into the most
|
||||
memory and network transfer efficient form using `aggregators.merge`
|
||||
|
||||
```toml
|
||||
[[aggregators.merge]]
|
||||
drop_original = true
|
||||
```
|
||||
|
||||
The output will be:
|
||||
|
||||
```shell
|
||||
telegraf --config etc/telegraf.conf --input-filter varnish --test
|
||||
```
|
||||
|
||||
```text
|
||||
varnish,host=kozel.local,section=MAIN backend_busy=0i,backend_conn=19i,backend_fail=0i,backend_recycle=8i,backend_req=19i,backend_retry=0i,backend_reuse=0i,backend_unhealthy=0i,bans=1i,bans_added=1i,bans_completed=1i,bans_deleted=0i,bans_dups=0i,bans_lurker_contention=0i,bans_lurker_obj_killed=0i,bans_lurker_obj_killed_cutoff=0i,bans_lurker_tested=0i,bans_lurker_tests_tested=0i,bans_obj=0i,bans_obj_killed=0i,bans_persisted_bytes=16i,bans_persisted_fragmentation=0i,bans_req=0i,bans_tested=0i,bans_tests_tested=0i,busy_killed=0i,busy_sleep=0i,busy_wakeup=0i,cache_hit=643999i,cache_hit_grace=22i,cache_hitmiss=0i,cache_hitpass=0i,cache_miss=1i,client_req=644000i,client_req_400=0i,client_req_417=0i,client_resp_500=0i,esi_errors=0i,esi_warnings=0i,exp_mailed=37i,exp_received=37i,fetch_1xx=0i,fetch_204=0i,fetch_304=2i,fetch_bad=0i,fetch_chunked=6i,fetch_eof=0i,fetch_failed=0i,fetch_head=0i,fetch_length=11i,fetch_no_thread=0i,fetch_none=0i,hcb_insert=1i,hcb_lock=1i,hcb_nolock=644000i,losthdr=0i,n_backend=19i,n_expired=1i,n_gunzip=289204i,n_gzip=0i,n_lru_limited=0i,n_lru_moved=843i,n_lru_nuked=0i,n_obj_purged=0i,n_object=0i,n_objectcore=40i,n_objecthead=40i,n_purges=0i,n_test_gunzip=6i,n_vampireobject=0i,n_vcl=7i,n_vcl_avail=7i,n_vcl_discard=0i,pools=2i,req_dropped=0i,s_fetch=1i,s_pass=0i,s_pipe=0i,s_pipe_hdrbytes=0i,s_pipe_in=0i,s_pipe_out=0i,s_req_bodybytes=0i,s_req_hdrbytes=54740000i,s_resp_bodybytes=341618192i,s_resp_hdrbytes=190035576i,s_sess=651038i,s_synth=0i,sc_overload=0i,sc_pipe_overflow=0i,sc_range_short=0i,sc_rem_close=7038i,sc_req_close=0i,sc_req_http10=644000i,sc_req_http20=0i,sc_resp_close=0i,sc_rx_bad=0i,sc_rx_body=0i,sc_rx_junk=0i,sc_rx_overflow=0i,sc_rx_timeout=0i,sc_tx_eof=0i,sc_tx_error=0i,sc_tx_pipe=0i,sc_vcl_failure=0i,sess_closed=644000i,sess_closed_err=644000i,sess_conn=651038i,sess_drop=0i,sess_dropped=0i,sess_fail=0i,sess_fail_ebadf=0i,sess_fail_econnaborted=0i,sess_fail_eintr=0i,sess_fail_emfile=0i,sess_fail_enomem=0i,sess_fail_other=0i,sess_herd=11i,sess_queued=0i,sess_readahead=0i,shm_cont=3572i,shm_cycles=10i,shm_flushes=0i,shm_records=30727866i,shm_writes=4661979i,summs=2225754i,thread_queue_len=0i,threads=200i,threads_created=200i,threads_destroyed=0i,threads_failed=0i,threads_limited=0i,uptime=4416326i,vcl_fail=0i,vmods=2i,ws_backend_overflow=0i,ws_client_overflow=0i,ws_session_overflow=0i,ws_thread_overflow=0i 1631121675000000000
|
||||
varnish,backend=default,host=kozel.local,section=VBE bereq_bodybytes=0i,bereq_hdrbytes=0i,beresp_bodybytes=0i,beresp_hdrbytes=0i,busy=0i,conn=0i,fail=0i,fail_eacces=0i,fail_eaddrnotavail=0i,fail_econnrefused=0i,fail_enetunreach=0i,fail_etimedout=0i,fail_other=0i,happy=9223372036854775807i,helddown=0i,pipe_hdrbytes=0i,pipe_in=0i,pipe_out=0i,req=0i,unhealthy=0i 1631121675000000000
|
||||
varnish,backend=server1,host=kozel.local,section=VBE bereq_bodybytes=0i,bereq_hdrbytes=0i,beresp_bodybytes=0i,beresp_hdrbytes=0i,busy=0i,conn=0i,fail=0i,fail_eacces=0i,fail_eaddrnotavail=0i,fail_econnrefused=30609i,fail_enetunreach=0i,fail_etimedout=0i,fail_other=0i,happy=0i,helddown=3i,pipe_hdrbytes=0i,pipe_in=0i,pipe_out=0i,req=0i,unhealthy=0i 1631121675000000000
|
||||
varnish,backend=server2,host=kozel.local,section=VBE bereq_bodybytes=0i,bereq_hdrbytes=0i,beresp_bodybytes=0i,beresp_hdrbytes=0i,busy=0i,conn=0i,fail=0i,fail_eacces=0i,fail_eaddrnotavail=0i,fail_econnrefused=30609i,fail_enetunreach=0i,fail_etimedout=0i,fail_other=0i,happy=0i,helddown=3i,pipe_hdrbytes=0i,pipe_in=0i,pipe_out=0i,req=0i,unhealthy=0i 1631121675000000000
|
||||
varnish,backend=server_test1,host=kozel.local,section=VBE bereq_bodybytes=0i,bereq_hdrbytes=0i,beresp_bodybytes=0i,beresp_hdrbytes=0i,busy=0i,conn=0i,fail=0i,fail_eacces=0i,fail_eaddrnotavail=0i,fail_econnrefused=49345i,fail_enetunreach=0i,fail_etimedout=0i,fail_other=0i,happy=0i,helddown=2i,pipe_hdrbytes=0i,pipe_in=0i,pipe_out=0i,req=0i,unhealthy=0i 1631121675000000000
|
||||
```
|
39
plugins/inputs/varnish/sample.conf
Normal file
39
plugins/inputs/varnish/sample.conf
Normal file
|
@ -0,0 +1,39 @@
|
|||
# A plugin to collect stats from Varnish HTTP Cache
|
||||
# This plugin ONLY supports non-Windows
|
||||
[[inputs.varnish]]
|
||||
## If running as a restricted user you can prepend sudo for additional access:
|
||||
#use_sudo = false
|
||||
|
||||
## The default location of the varnishstat binary can be overridden with:
|
||||
binary = "/usr/bin/varnishstat"
|
||||
|
||||
## Additional custom arguments for the varnishstat command
|
||||
# binary_args = ["-f", "MAIN.*"]
|
||||
|
||||
## The default location of the varnishadm binary can be overridden with:
|
||||
adm_binary = "/usr/bin/varnishadm"
|
||||
|
||||
## Custom arguments for the varnishadm command
|
||||
# adm_binary_args = [""]
|
||||
|
||||
## Metric version defaults to metric_version=1, use metric_version=2 for removal of nonactive vcls
|
||||
## Varnish 6.0.2 and newer is required for metric_version=2.
|
||||
metric_version = 1
|
||||
|
||||
## Additional regexps to override builtin conversion of varnish metrics into telegraf metrics.
|
||||
## Regexp group "_vcl" is used for extracting the VCL name. Metrics that contain nonactive VCL's are skipped.
|
||||
## Regexp group "_field" overrides the field name. Other named regexp groups are used as tags.
|
||||
# regexps = ['^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P<group>[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val']
|
||||
|
||||
## By default, telegraf gather stats for 3 metric points.
|
||||
## Setting stats will override the defaults shown below.
|
||||
## Glob matching can be used, ie, stats = ["MAIN.*"]
|
||||
## stats may also be set to ["*"], which will collect all stats
|
||||
stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
|
||||
|
||||
## Optional name for the varnish instance (or working directory) to query
|
||||
## Usually append after -n in varnish cli
|
||||
# instance_name = instanceName
|
||||
|
||||
## Timeout for varnishstat command
|
||||
# timeout = "1s"
|
1478
plugins/inputs/varnish/test_data/varnish4_4.json
Normal file
1478
plugins/inputs/varnish/test_data/varnish4_4.json
Normal file
File diff suppressed because it is too large
Load diff
2173
plugins/inputs/varnish/test_data/varnish6.2.1_reload.json
Normal file
2173
plugins/inputs/varnish/test_data/varnish6.2.1_reload.json
Normal file
File diff suppressed because it is too large
Load diff
2154
plugins/inputs/varnish/test_data/varnish6.6.json
Normal file
2154
plugins/inputs/varnish/test_data/varnish6.6.json
Normal file
File diff suppressed because it is too large
Load diff
24
plugins/inputs/varnish/test_data/varnish_types.json
Normal file
24
plugins/inputs/varnish/test_data/varnish_types.json
Normal file
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
"version": 1,
|
||||
"timestamp": "2021-06-23T17:06:37",
|
||||
"counters": {
|
||||
"XXX.floatTest": {
|
||||
"description": "floatTest",
|
||||
"flag": "c",
|
||||
"format": "d",
|
||||
"value": 123.45
|
||||
},
|
||||
"XXX.stringTest": {
|
||||
"description": "stringTest",
|
||||
"flag": "c",
|
||||
"format": "d",
|
||||
"value": "abcdefg"
|
||||
},
|
||||
"XXX.intTest": {
|
||||
"description": "intTest",
|
||||
"flag": "c",
|
||||
"format": "d",
|
||||
"value": 12345
|
||||
}
|
||||
}
|
||||
}
|
474
plugins/inputs/varnish/test_data/varnish_v1_reload.txt
Normal file
474
plugins/inputs/varnish/test_data/varnish_v1_reload.txt
Normal file
|
@ -0,0 +1,474 @@
|
|||
MGT.uptime 326570 1.00 Management process uptime
|
||||
MGT.child_start 1 0.00 Child process started
|
||||
MGT.child_exit 0 0.00 Child process normal exit
|
||||
MGT.child_stop 0 0.00 Child process unexpected exit
|
||||
MGT.child_died 0 0.00 Child process died (signal)
|
||||
MGT.child_dump 0 0.00 Child process core dumped
|
||||
MGT.child_panic 0 0.00 Child process panic
|
||||
MAIN.summs 1773584 5.43 stat summ operations
|
||||
MAIN.uptime 326571 1.00 Child process uptime
|
||||
MAIN.sess_conn 651038 1.99 Sessions accepted
|
||||
MAIN.sess_drop 0 0.00 Sessions dropped
|
||||
MAIN.sess_fail 0 0.00 Session accept failures
|
||||
MAIN.sess_fail_econnaborted 0 0.00 Session accept failures: connection aborted
|
||||
MAIN.sess_fail_eintr 0 0.00 Session accept failures: interrupted system call
|
||||
MAIN.sess_fail_emfile 0 0.00 Session accept failures: too many open files
|
||||
MAIN.sess_fail_ebadf 0 0.00 Session accept failures: bad file descriptor
|
||||
MAIN.sess_fail_enomem 0 0.00 Session accept failures: not enough memory
|
||||
MAIN.sess_fail_other 0 0.00 Session accept failures: other
|
||||
MAIN.client_req_400 0 0.00 Client requests received, subject to 400 errors
|
||||
MAIN.client_req_417 0 0.00 Client requests received, subject to 417 errors
|
||||
MAIN.client_req 644000 1.97 Good client requests received
|
||||
MAIN.cache_hit 643999 1.97 Cache hits
|
||||
MAIN.cache_hit_grace 22 0.00 Cache grace hits
|
||||
MAIN.cache_hitpass 0 0.00 Cache hits for pass.
|
||||
MAIN.cache_hitmiss 0 0.00 Cache hits for miss.
|
||||
MAIN.cache_miss 1 0.00 Cache misses
|
||||
MAIN.backend_conn 19 0.00 Backend conn. success
|
||||
MAIN.backend_unhealthy 0 0.00 Backend conn. not attempted
|
||||
MAIN.backend_busy 0 0.00 Backend conn. too many
|
||||
MAIN.backend_fail 0 0.00 Backend conn. failures
|
||||
MAIN.backend_reuse 0 0.00 Backend conn. reuses
|
||||
MAIN.backend_recycle 8 0.00 Backend conn. recycles
|
||||
MAIN.backend_retry 0 0.00 Backend conn. retry
|
||||
MAIN.fetch_head 0 0.00 Fetch no body (HEAD)
|
||||
MAIN.fetch_length 11 0.00 Fetch with Length
|
||||
MAIN.fetch_chunked 6 0.00 Fetch chunked
|
||||
MAIN.fetch_eof 0 0.00 Fetch EOF
|
||||
MAIN.fetch_bad 0 0.00 Fetch bad T-E
|
||||
MAIN.fetch_none 0 0.00 Fetch no body
|
||||
MAIN.fetch_1xx 0 0.00 Fetch no body (1xx)
|
||||
MAIN.fetch_204 0 0.00 Fetch no body (204)
|
||||
MAIN.fetch_304 2 0.00 Fetch no body (304)
|
||||
MAIN.fetch_failed 0 0.00 Fetch failed (all causes)
|
||||
MAIN.fetch_no_thread 0 0.00 Fetch failed (no thread)
|
||||
MAIN.pools 2 . Number of thread pools
|
||||
MAIN.threads 200 . Total number of threads
|
||||
MAIN.threads_limited 0 0.00 Threads hit max
|
||||
MAIN.threads_created 200 0.00 Threads created
|
||||
MAIN.threads_destroyed 0 0.00 Threads destroyed
|
||||
MAIN.threads_failed 0 0.00 Thread creation failed
|
||||
MAIN.thread_queue_len 0 . Length of session queue
|
||||
MAIN.busy_sleep 0 0.00 Number of requests sent to sleep on busy objhdr
|
||||
MAIN.busy_wakeup 0 0.00 Number of requests woken after sleep on busy objhdr
|
||||
MAIN.busy_killed 0 0.00 Number of requests killed after sleep on busy objhdr
|
||||
MAIN.sess_queued 0 0.00 Sessions queued for thread
|
||||
MAIN.sess_dropped 0 0.00 Sessions dropped for thread
|
||||
MAIN.req_dropped 0 0.00 Requests dropped
|
||||
MAIN.n_object 0 . object structs made
|
||||
MAIN.n_vampireobject 0 . unresurrected objects
|
||||
MAIN.n_objectcore 40 . objectcore structs made
|
||||
MAIN.n_objecthead 40 . objecthead structs made
|
||||
MAIN.n_backend 19 . Number of backends
|
||||
MAIN.n_expired 1 0.00 Number of expired objects
|
||||
MAIN.n_lru_nuked 0 0.00 Number of LRU nuked objects
|
||||
MAIN.n_lru_moved 843 0.00 Number of LRU moved objects
|
||||
MAIN.n_lru_limited 0 0.00 Reached nuke_limit
|
||||
MAIN.losthdr 0 0.00 HTTP header overflows
|
||||
MAIN.s_sess 651038 1.99 Total sessions seen
|
||||
MAIN.s_pipe 0 0.00 Total pipe sessions seen
|
||||
MAIN.s_pass 0 0.00 Total pass-ed requests seen
|
||||
MAIN.s_fetch 1 0.00 Total backend fetches initiated
|
||||
MAIN.s_synth 0 0.00 Total synthetic responses made
|
||||
MAIN.s_req_hdrbytes 54740000 167.62 Request header bytes
|
||||
MAIN.s_req_bodybytes 0 0.00 Request body bytes
|
||||
MAIN.s_resp_hdrbytes 190035576 581.91 Response header bytes
|
||||
MAIN.s_resp_bodybytes 341618192 1046.08 Response body bytes
|
||||
MAIN.s_pipe_hdrbytes 0 0.00 Pipe request header bytes
|
||||
MAIN.s_pipe_in 0 0.00 Piped bytes from client
|
||||
MAIN.s_pipe_out 0 0.00 Piped bytes to client
|
||||
MAIN.sess_closed 644000 1.97 Session Closed
|
||||
MAIN.sess_closed_err 644000 1.97 Session Closed with error
|
||||
MAIN.sess_readahead 0 0.00 Session Read Ahead
|
||||
MAIN.sess_herd 11 0.00 Session herd
|
||||
MAIN.sc_rem_close 7038 0.02 Session OK REM_CLOSE
|
||||
MAIN.sc_req_close 0 0.00 Session OK REQ_CLOSE
|
||||
MAIN.sc_req_http10 644000 1.97 Session Err REQ_HTTP10
|
||||
MAIN.sc_rx_bad 0 0.00 Session Err RX_BAD
|
||||
MAIN.sc_rx_body 0 0.00 Session Err RX_BODY
|
||||
MAIN.sc_rx_junk 0 0.00 Session Err RX_JUNK
|
||||
MAIN.sc_rx_overflow 0 0.00 Session Err RX_OVERFLOW
|
||||
MAIN.sc_rx_timeout 0 0.00 Session Err RX_TIMEOUT
|
||||
MAIN.sc_tx_pipe 0 0.00 Session OK TX_PIPE
|
||||
MAIN.sc_tx_error 0 0.00 Session Err TX_ERROR
|
||||
MAIN.sc_tx_eof 0 0.00 Session OK TX_EOF
|
||||
MAIN.sc_resp_close 0 0.00 Session OK RESP_CLOSE
|
||||
MAIN.sc_overload 0 0.00 Session Err OVERLOAD
|
||||
MAIN.sc_pipe_overflow 0 0.00 Session Err PIPE_OVERFLOW
|
||||
MAIN.sc_range_short 0 0.00 Session Err RANGE_SHORT
|
||||
MAIN.sc_req_http20 0 0.00 Session Err REQ_HTTP20
|
||||
MAIN.sc_vcl_failure 0 0.00 Session Err VCL_FAILURE
|
||||
MAIN.client_resp_500 0 0.00 Delivery failed due to insufficient workspace.
|
||||
MAIN.ws_backend_overflow 0 0.00 workspace_backend overflows
|
||||
MAIN.ws_client_overflow 0 0.00 workspace_client overflows
|
||||
MAIN.ws_thread_overflow 0 0.00 workspace_thread overflows
|
||||
MAIN.ws_session_overflow 0 0.00 workspace_session overflows
|
||||
MAIN.shm_records 30395363 93.07 SHM records
|
||||
MAIN.shm_writes 4329476 13.26 SHM writes
|
||||
MAIN.shm_flushes 0 0.00 SHM flushes due to overflow
|
||||
MAIN.shm_cont 3572 0.01 SHM MTX contention
|
||||
MAIN.shm_cycles 10 0.00 SHM cycles through buffer
|
||||
MAIN.backend_req 19 0.00 Backend requests made
|
||||
MAIN.n_vcl 7 . Number of loaded VCLs in total
|
||||
MAIN.n_vcl_avail 7 . Number of VCLs available
|
||||
MAIN.n_vcl_discard 0 . Number of discarded VCLs
|
||||
MAIN.vcl_fail 0 0.00 VCL failures
|
||||
MAIN.bans 1 . Count of bans
|
||||
MAIN.bans_completed 1 . Number of bans marked 'completed'
|
||||
MAIN.bans_obj 0 . Number of bans using obj.*
|
||||
MAIN.bans_req 0 . Number of bans using req.*
|
||||
MAIN.bans_added 1 0.00 Bans added
|
||||
MAIN.bans_deleted 0 0.00 Bans deleted
|
||||
MAIN.bans_tested 0 0.00 Bans tested against objects (lookup)
|
||||
MAIN.bans_obj_killed 0 0.00 Objects killed by bans (lookup)
|
||||
MAIN.bans_lurker_tested 0 0.00 Bans tested against objects (lurker)
|
||||
MAIN.bans_tests_tested 0 0.00 Ban tests tested against objects (lookup)
|
||||
MAIN.bans_lurker_tests_tested 0 0.00 Ban tests tested against objects (lurker)
|
||||
MAIN.bans_lurker_obj_killed 0 0.00 Objects killed by bans (lurker)
|
||||
MAIN.bans_lurker_obj_killed_cutoff 0 0.00 Objects killed by bans for cutoff (lurker)
|
||||
MAIN.bans_dups 0 0.00 Bans superseded by other bans
|
||||
MAIN.bans_lurker_contention 0 0.00 Lurker gave way for lookup
|
||||
MAIN.bans_persisted_bytes 16 . Bytes used by the persisted ban lists
|
||||
MAIN.bans_persisted_fragmentation 0 . Extra bytes in persisted ban lists due to fragmentation
|
||||
MAIN.n_purges 0 0.00 Number of purge operations executed
|
||||
MAIN.n_obj_purged 0 0.00 Number of purged objects
|
||||
MAIN.exp_mailed 37 0.00 Number of objects mailed to expiry thread
|
||||
MAIN.exp_received 37 0.00 Number of objects received by expiry thread
|
||||
MAIN.hcb_nolock 644000 1.97 HCB Lookups without lock
|
||||
MAIN.hcb_lock 1 0.00 HCB Lookups with lock
|
||||
MAIN.hcb_insert 1 0.00 HCB Inserts
|
||||
MAIN.esi_errors 0 0.00 ESI parse errors (unlock)
|
||||
MAIN.esi_warnings 0 0.00 ESI parse warnings (unlock)
|
||||
MAIN.vmods 2 . Loaded VMODs
|
||||
MAIN.n_gzip 0 0.00 Gzip operations
|
||||
MAIN.n_gunzip 289204 0.89 Gunzip operations
|
||||
MAIN.n_test_gunzip 6 0.00 Test gunzip operations
|
||||
LCK.backend.creat 20 0.00 Created locks
|
||||
LCK.backend.destroy 0 0.00 Destroyed locks
|
||||
LCK.backend.locks 707323 2.17 Lock Operations
|
||||
LCK.backend.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.backend.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.ban.creat 1 0.00 Created locks
|
||||
LCK.ban.destroy 0 0.00 Destroyed locks
|
||||
LCK.ban.locks 10688 0.03 Lock Operations
|
||||
LCK.ban.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.ban.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.busyobj.creat 59 0.00 Created locks
|
||||
LCK.busyobj.destroy 19 0.00 Destroyed locks
|
||||
LCK.busyobj.locks 139 0.00 Lock Operations
|
||||
LCK.busyobj.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.busyobj.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.cli.creat 1 0.00 Created locks
|
||||
LCK.cli.destroy 0 0.00 Destroyed locks
|
||||
LCK.cli.locks 100758 0.31 Lock Operations
|
||||
LCK.cli.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.cli.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.exp.creat 1 0.00 Created locks
|
||||
LCK.exp.destroy 0 0.00 Destroyed locks
|
||||
LCK.exp.locks 83338 0.26 Lock Operations
|
||||
LCK.exp.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.exp.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.hcb.creat 1 0.00 Created locks
|
||||
LCK.hcb.destroy 0 0.00 Destroyed locks
|
||||
LCK.hcb.locks 1468 0.00 Lock Operations
|
||||
LCK.hcb.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.hcb.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.lru.creat 2 0.00 Created locks
|
||||
LCK.lru.destroy 0 0.00 Destroyed locks
|
||||
LCK.lru.locks 881 0.00 Lock Operations
|
||||
LCK.lru.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.lru.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.mempool.creat 5 0.00 Created locks
|
||||
LCK.mempool.destroy 0 0.00 Destroyed locks
|
||||
LCK.mempool.locks 3772135 11.55 Lock Operations
|
||||
LCK.mempool.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.mempool.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.objhdr.creat 42 0.00 Created locks
|
||||
LCK.objhdr.destroy 1 0.00 Destroyed locks
|
||||
LCK.objhdr.locks 1288225 3.94 Lock Operations
|
||||
LCK.objhdr.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.objhdr.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.pipestat.creat 1 0.00 Created locks
|
||||
LCK.pipestat.destroy 0 0.00 Destroyed locks
|
||||
LCK.pipestat.locks 0 0.00 Lock Operations
|
||||
LCK.pipestat.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.pipestat.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.sess.creat 651038 1.99 Created locks
|
||||
LCK.sess.destroy 651038 1.99 Destroyed locks
|
||||
LCK.sess.locks 651076 1.99 Lock Operations
|
||||
LCK.sess.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.sess.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.tcp_pool.creat 5 0.00 Created locks
|
||||
LCK.tcp_pool.destroy 0 0.00 Destroyed locks
|
||||
LCK.tcp_pool.locks 358117 1.10 Lock Operations
|
||||
LCK.tcp_pool.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.tcp_pool.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.vbe.creat 1 0.00 Created locks
|
||||
LCK.vbe.destroy 0 0.00 Destroyed locks
|
||||
LCK.vbe.locks 336547 1.03 Lock Operations
|
||||
LCK.vbe.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.vbe.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.vcapace.creat 1 0.00 Created locks
|
||||
LCK.vcapace.destroy 0 0.00 Destroyed locks
|
||||
LCK.vcapace.locks 0 0.00 Lock Operations
|
||||
LCK.vcapace.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.vcapace.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.vcl.creat 1 0.00 Created locks
|
||||
LCK.vcl.destroy 0 0.00 Destroyed locks
|
||||
LCK.vcl.locks 398 0.00 Lock Operations
|
||||
LCK.vcl.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.vcl.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.vxid.creat 1 0.00 Created locks
|
||||
LCK.vxid.destroy 0 0.00 Destroyed locks
|
||||
LCK.vxid.locks 60 0.00 Lock Operations
|
||||
LCK.vxid.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.vxid.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.waiter.creat 2 0.00 Created locks
|
||||
LCK.waiter.destroy 0 0.00 Destroyed locks
|
||||
LCK.waiter.locks 5323 0.02 Lock Operations
|
||||
LCK.waiter.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.waiter.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.wq.creat 3 0.00 Created locks
|
||||
LCK.wq.destroy 0 0.00 Destroyed locks
|
||||
LCK.wq.locks 3161556 9.68 Lock Operations
|
||||
LCK.wq.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.wq.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
LCK.wstat.creat 1 0.00 Created locks
|
||||
LCK.wstat.destroy 0 0.00 Destroyed locks
|
||||
LCK.wstat.locks 976543 2.99 Lock Operations
|
||||
LCK.wstat.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.wstat.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
MEMPOOL.busyobj.live 0 . In use
|
||||
MEMPOOL.busyobj.pool 10 . In Pool
|
||||
MEMPOOL.busyobj.sz_wanted 65536 . Size requested
|
||||
MEMPOOL.busyobj.sz_actual 65504 . Size allocated
|
||||
MEMPOOL.busyobj.allocs 19 0.00 Allocations
|
||||
MEMPOOL.busyobj.frees 19 0.00 Frees
|
||||
MEMPOOL.busyobj.recycle 19 0.00 Recycled from pool
|
||||
MEMPOOL.busyobj.timeout 0 0.00 Timed out from pool
|
||||
MEMPOOL.busyobj.toosmall 0 0.00 Too small to recycle
|
||||
MEMPOOL.busyobj.surplus 0 0.00 Too many for pool
|
||||
MEMPOOL.busyobj.randry 0 0.00 Pool ran dry
|
||||
MEMPOOL.req0.live 0 . In use
|
||||
MEMPOOL.req0.pool 10 . In Pool
|
||||
MEMPOOL.req0.sz_wanted 65536 . Size requested
|
||||
MEMPOOL.req0.sz_actual 65504 . Size allocated
|
||||
MEMPOOL.req0.allocs 326920 1.00 Allocations
|
||||
MEMPOOL.req0.frees 326920 1.00 Frees
|
||||
MEMPOOL.req0.recycle 326833 1.00 Recycled from pool
|
||||
MEMPOOL.req0.timeout 138 0.00 Timed out from pool
|
||||
MEMPOOL.req0.toosmall 0 0.00 Too small to recycle
|
||||
MEMPOOL.req0.surplus 0 0.00 Too many for pool
|
||||
MEMPOOL.req0.randry 87 0.00 Pool ran dry
|
||||
MEMPOOL.sess0.live 0 . In use
|
||||
MEMPOOL.sess0.pool 10 . In Pool
|
||||
MEMPOOL.sess0.sz_wanted 512 . Size requested
|
||||
MEMPOOL.sess0.sz_actual 480 . Size allocated
|
||||
MEMPOOL.sess0.allocs 326920 1.00 Allocations
|
||||
MEMPOOL.sess0.frees 326920 1.00 Frees
|
||||
MEMPOOL.sess0.recycle 326764 1.00 Recycled from pool
|
||||
MEMPOOL.sess0.timeout 201 0.00 Timed out from pool
|
||||
MEMPOOL.sess0.toosmall 0 0.00 Too small to recycle
|
||||
MEMPOOL.sess0.surplus 0 0.00 Too many for pool
|
||||
MEMPOOL.sess0.randry 156 0.00 Pool ran dry
|
||||
LCK.sma.creat 2 0.00 Created locks
|
||||
LCK.sma.destroy 0 0.00 Destroyed locks
|
||||
LCK.sma.locks 88 0.00 Lock Operations
|
||||
LCK.sma.dbg_busy 0 0.00 Contended lock operations
|
||||
LCK.sma.dbg_try_fail 0 0.00 Contended trylock operations
|
||||
SMA.s0.c_req 44 0.00 Allocator requests
|
||||
SMA.s0.c_fail 0 0.00 Allocator failures
|
||||
SMA.s0.c_bytes 112568 0.34 Bytes allocated
|
||||
SMA.s0.c_freed 112568 0.34 Bytes freed
|
||||
SMA.s0.g_alloc 0 . Allocations outstanding
|
||||
SMA.s0.g_bytes 0 . Bytes outstanding
|
||||
SMA.s0.g_space 268435456 . Bytes available
|
||||
SMA.Transient.c_req 0 0.00 Allocator requests
|
||||
SMA.Transient.c_fail 0 0.00 Allocator failures
|
||||
SMA.Transient.c_bytes 0 0.00 Bytes allocated
|
||||
SMA.Transient.c_freed 0 0.00 Bytes freed
|
||||
SMA.Transient.g_alloc 0 . Allocations outstanding
|
||||
SMA.Transient.g_bytes 0 . Bytes outstanding
|
||||
SMA.Transient.g_space 0 . Bytes available
|
||||
MEMPOOL.req1.live 0 . In use
|
||||
MEMPOOL.req1.pool 10 . In Pool
|
||||
MEMPOOL.req1.sz_wanted 65536 . Size requested
|
||||
MEMPOOL.req1.sz_actual 65504 . Size allocated
|
||||
MEMPOOL.req1.allocs 324129 0.99 Allocations
|
||||
MEMPOOL.req1.frees 324129 0.99 Frees
|
||||
MEMPOOL.req1.recycle 324018 0.99 Recycled from pool
|
||||
MEMPOOL.req1.timeout 165 0.00 Timed out from pool
|
||||
MEMPOOL.req1.toosmall 0 0.00 Too small to recycle
|
||||
MEMPOOL.req1.surplus 0 0.00 Too many for pool
|
||||
MEMPOOL.req1.randry 111 0.00 Pool ran dry
|
||||
MEMPOOL.sess1.live 0 . In use
|
||||
MEMPOOL.sess1.pool 10 . In Pool
|
||||
MEMPOOL.sess1.sz_wanted 512 . Size requested
|
||||
MEMPOOL.sess1.sz_actual 480 . Size allocated
|
||||
MEMPOOL.sess1.allocs 324118 0.99 Allocations
|
||||
MEMPOOL.sess1.frees 324118 0.99 Frees
|
||||
MEMPOOL.sess1.recycle 323926 0.99 Recycled from pool
|
||||
MEMPOOL.sess1.timeout 242 0.00 Timed out from pool
|
||||
MEMPOOL.sess1.toosmall 0 0.00 Too small to recycle
|
||||
MEMPOOL.sess1.surplus 0 0.00 Too many for pool
|
||||
MEMPOOL.sess1.randry 192 0.00 Pool ran dry
|
||||
VBE.reload_20210722_162225_1979744.server_test1.happy 0 . Happy health probes
|
||||
VBE.reload_20210722_162225_1979744.server_test1.bereq_hdrbytes 0 0.00 Request header bytes
|
||||
VBE.reload_20210722_162225_1979744.server_test1.bereq_bodybytes 0 0.00 Request body bytes
|
||||
VBE.reload_20210722_162225_1979744.server_test1.beresp_hdrbytes 0 0.00 Response header bytes
|
||||
VBE.reload_20210722_162225_1979744.server_test1.beresp_bodybytes 0 0.00 Response body bytes
|
||||
VBE.reload_20210722_162225_1979744.server_test1.pipe_hdrbytes 0 0.00 Pipe request header bytes
|
||||
VBE.reload_20210722_162225_1979744.server_test1.pipe_out 0 0.00 Piped bytes to backend
|
||||
VBE.reload_20210722_162225_1979744.server_test1.pipe_in 0 0.00 Piped bytes from backend
|
||||
VBE.reload_20210722_162225_1979744.server_test1.conn 0 . Concurrent connections to backend
|
||||
VBE.reload_20210722_162225_1979744.server_test1.req 0 0.00 Backend requests sent
|
||||
VBE.reload_20210722_162225_1979744.server_test1.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy
|
||||
VBE.reload_20210722_162225_1979744.server_test1.busy 0 0.00 Fetches not attempted due to backend being busy
|
||||
VBE.reload_20210722_162225_1979744.server_test1.fail 0 0.00 Connections failed
|
||||
VBE.reload_20210722_162225_1979744.server_test1.fail_eacces 0 0.00 Connections failed with EACCES or EPERM
|
||||
VBE.reload_20210722_162225_1979744.server_test1.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL
|
||||
VBE.reload_20210722_162225_1979744.server_test1.fail_econnrefused 15037 0.05 Connections failed with ECONNREFUSED
|
||||
VBE.reload_20210722_162225_1979744.server_test1.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH
|
||||
VBE.reload_20210722_162225_1979744.server_test1.fail_etimedout 0 0.00 Connections failed ETIMEDOUT
|
||||
VBE.reload_20210722_162225_1979744.server_test1.fail_other 0 0.00 Connections failed for other reason
|
||||
VBE.reload_20210722_162225_1979744.server_test1.helddown 0 0.00 Connection opens not attempted
|
||||
VBE.reload_20210722_162225_1979744.default.happy 18446744073709551615 . Happy health probes
|
||||
VBE.reload_20210722_162225_1979744.default.bereq_hdrbytes 0 0.00 Request header bytes
|
||||
VBE.reload_20210722_162225_1979744.default.bereq_bodybytes 0 0.00 Request body bytes
|
||||
VBE.reload_20210722_162225_1979744.default.beresp_hdrbytes 0 0.00 Response header bytes
|
||||
VBE.reload_20210722_162225_1979744.default.beresp_bodybytes 0 0.00 Response body bytes
|
||||
VBE.reload_20210722_162225_1979744.default.pipe_hdrbytes 0 0.00 Pipe request header bytes
|
||||
VBE.reload_20210722_162225_1979744.default.pipe_out 0 0.00 Piped bytes to backend
|
||||
VBE.reload_20210722_162225_1979744.default.pipe_in 0 0.00 Piped bytes from backend
|
||||
VBE.reload_20210722_162225_1979744.default.conn 0 . Concurrent connections to backend
|
||||
VBE.reload_20210722_162225_1979744.default.req 0 0.00 Backend requests sent
|
||||
VBE.reload_20210722_162225_1979744.default.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy
|
||||
VBE.reload_20210722_162225_1979744.default.busy 0 0.00 Fetches not attempted due to backend being busy
|
||||
VBE.reload_20210722_162225_1979744.default.fail 0 0.00 Connections failed
|
||||
VBE.reload_20210722_162225_1979744.default.fail_eacces 0 0.00 Connections failed with EACCES or EPERM
|
||||
VBE.reload_20210722_162225_1979744.default.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL
|
||||
VBE.reload_20210722_162225_1979744.default.fail_econnrefused 0 0.00 Connections failed with ECONNREFUSED
|
||||
VBE.reload_20210722_162225_1979744.default.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH
|
||||
VBE.reload_20210722_162225_1979744.default.fail_etimedout 0 0.00 Connections failed ETIMEDOUT
|
||||
VBE.reload_20210722_162225_1979744.default.fail_other 0 0.00 Connections failed for other reason
|
||||
VBE.reload_20210722_162225_1979744.default.helddown 0 0.00 Connection opens not attempted
|
||||
VBE.reload_20210722_162225_1979744.server1.happy 0 . Happy health probes
|
||||
VBE.reload_20210722_162225_1979744.server1.bereq_hdrbytes 0 0.00 Request header bytes
|
||||
VBE.reload_20210722_162225_1979744.server1.bereq_bodybytes 0 0.00 Request body bytes
|
||||
VBE.reload_20210722_162225_1979744.server1.beresp_hdrbytes 0 0.00 Response header bytes
|
||||
VBE.reload_20210722_162225_1979744.server1.beresp_bodybytes 0 0.00 Response body bytes
|
||||
VBE.reload_20210722_162225_1979744.server1.pipe_hdrbytes 0 0.00 Pipe request header bytes
|
||||
VBE.reload_20210722_162225_1979744.server1.pipe_out 0 0.00 Piped bytes to backend
|
||||
VBE.reload_20210722_162225_1979744.server1.pipe_in 0 0.00 Piped bytes from backend
|
||||
VBE.reload_20210722_162225_1979744.server1.conn 0 . Concurrent connections to backend
|
||||
VBE.reload_20210722_162225_1979744.server1.req 0 0.00 Backend requests sent
|
||||
VBE.reload_20210722_162225_1979744.server1.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy
|
||||
VBE.reload_20210722_162225_1979744.server1.busy 0 0.00 Fetches not attempted due to backend being busy
|
||||
VBE.reload_20210722_162225_1979744.server1.fail 0 0.00 Connections failed
|
||||
VBE.reload_20210722_162225_1979744.server1.fail_eacces 0 0.00 Connections failed with EACCES or EPERM
|
||||
VBE.reload_20210722_162225_1979744.server1.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL
|
||||
VBE.reload_20210722_162225_1979744.server1.fail_econnrefused 9471 0.03 Connections failed with ECONNREFUSED
|
||||
VBE.reload_20210722_162225_1979744.server1.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH
|
||||
VBE.reload_20210722_162225_1979744.server1.fail_etimedout 0 0.00 Connections failed ETIMEDOUT
|
||||
VBE.reload_20210722_162225_1979744.server1.fail_other 0 0.00 Connections failed for other reason
|
||||
VBE.reload_20210722_162225_1979744.server1.helddown 4 0.00 Connection opens not attempted
|
||||
VBE.reload_20210722_162225_1979744.server2.happy 0 . Happy health probes
|
||||
VBE.reload_20210722_162225_1979744.server2.bereq_hdrbytes 0 0.00 Request header bytes
|
||||
VBE.reload_20210722_162225_1979744.server2.bereq_bodybytes 0 0.00 Request body bytes
|
||||
VBE.reload_20210722_162225_1979744.server2.beresp_hdrbytes 0 0.00 Response header bytes
|
||||
VBE.reload_20210722_162225_1979744.server2.beresp_bodybytes 0 0.00 Response body bytes
|
||||
VBE.reload_20210722_162225_1979744.server2.pipe_hdrbytes 0 0.00 Pipe request header bytes
|
||||
VBE.reload_20210722_162225_1979744.server2.pipe_out 0 0.00 Piped bytes to backend
|
||||
VBE.reload_20210722_162225_1979744.server2.pipe_in 0 0.00 Piped bytes from backend
|
||||
VBE.reload_20210722_162225_1979744.server2.conn 0 . Concurrent connections to backend
|
||||
VBE.reload_20210722_162225_1979744.server2.req 0 0.00 Backend requests sent
|
||||
VBE.reload_20210722_162225_1979744.server2.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy
|
||||
VBE.reload_20210722_162225_1979744.server2.busy 0 0.00 Fetches not attempted due to backend being busy
|
||||
VBE.reload_20210722_162225_1979744.server2.fail 0 0.00 Connections failed
|
||||
VBE.reload_20210722_162225_1979744.server2.fail_eacces 0 0.00 Connections failed with EACCES or EPERM
|
||||
VBE.reload_20210722_162225_1979744.server2.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL
|
||||
VBE.reload_20210722_162225_1979744.server2.fail_econnrefused 9471 0.03 Connections failed with ECONNREFUSED
|
||||
VBE.reload_20210722_162225_1979744.server2.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH
|
||||
VBE.reload_20210722_162225_1979744.server2.fail_etimedout 0 0.00 Connections failed ETIMEDOUT
|
||||
VBE.reload_20210722_162225_1979744.server2.fail_other 0 0.00 Connections failed for other reason
|
||||
VBE.reload_20210722_162225_1979744.server2.helddown 4 0.00 Connection opens not attempted
|
||||
VBE.reload_20210723_091821_2056185.server_test1.happy 64 . Happy health probes
|
||||
VBE.reload_20210723_091821_2056185.server_test1.bereq_hdrbytes 0 0.00 Request header bytes
|
||||
VBE.reload_20210723_091821_2056185.server_test1.bereq_bodybytes 0 0.00 Request body bytes
|
||||
VBE.reload_20210723_091821_2056185.server_test1.beresp_hdrbytes 0 0.00 Response header bytes
|
||||
VBE.reload_20210723_091821_2056185.server_test1.beresp_bodybytes 0 0.00 Response body bytes
|
||||
VBE.reload_20210723_091821_2056185.server_test1.pipe_hdrbytes 0 0.00 Pipe request header bytes
|
||||
VBE.reload_20210723_091821_2056185.server_test1.pipe_out 0 0.00 Piped bytes to backend
|
||||
VBE.reload_20210723_091821_2056185.server_test1.pipe_in 0 0.00 Piped bytes from backend
|
||||
VBE.reload_20210723_091821_2056185.server_test1.conn 0 . Concurrent connections to backend
|
||||
VBE.reload_20210723_091821_2056185.server_test1.req 0 0.00 Backend requests sent
|
||||
VBE.reload_20210723_091821_2056185.server_test1.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy
|
||||
VBE.reload_20210723_091821_2056185.server_test1.busy 0 0.00 Fetches not attempted due to backend being busy
|
||||
VBE.reload_20210723_091821_2056185.server_test1.fail 0 0.00 Connections failed
|
||||
VBE.reload_20210723_091821_2056185.server_test1.fail_eacces 0 0.00 Connections failed with EACCES or EPERM
|
||||
VBE.reload_20210723_091821_2056185.server_test1.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL
|
||||
VBE.reload_20210723_091821_2056185.server_test1.fail_econnrefused 6 0.00 Connections failed with ECONNREFUSED
|
||||
VBE.reload_20210723_091821_2056185.server_test1.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH
|
||||
VBE.reload_20210723_091821_2056185.server_test1.fail_etimedout 0 0.00 Connections failed ETIMEDOUT
|
||||
VBE.reload_20210723_091821_2056185.server_test1.fail_other 0 0.00 Connections failed for other reason
|
||||
VBE.reload_20210723_091821_2056185.server_test1.helddown 0 0.00 Connection opens not attempted
|
||||
VBE.reload_20210723_091821_2056185.default.happy 63 . Happy health probes
|
||||
VBE.reload_20210723_091821_2056185.default.bereq_hdrbytes 0 0.00 Request header bytes
|
||||
VBE.reload_20210723_091821_2056185.default.bereq_bodybytes 0 0.00 Request body bytes
|
||||
VBE.reload_20210723_091821_2056185.default.beresp_hdrbytes 0 0.00 Response header bytes
|
||||
VBE.reload_20210723_091821_2056185.default.beresp_bodybytes 0 0.00 Response body bytes
|
||||
VBE.reload_20210723_091821_2056185.default.pipe_hdrbytes 0 0.00 Pipe request header bytes
|
||||
VBE.reload_20210723_091821_2056185.default.pipe_out 0 0.00 Piped bytes to backend
|
||||
VBE.reload_20210723_091821_2056185.default.pipe_in 0 0.00 Piped bytes from backend
|
||||
VBE.reload_20210723_091821_2056185.default.conn 0 . Concurrent connections to backend
|
||||
VBE.reload_20210723_091821_2056185.default.req 0 0.00 Backend requests sent
|
||||
VBE.reload_20210723_091821_2056185.default.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy
|
||||
VBE.reload_20210723_091821_2056185.default.busy 0 0.00 Fetches not attempted due to backend being busy
|
||||
VBE.reload_20210723_091821_2056185.default.fail 0 0.00 Connections failed
|
||||
VBE.reload_20210723_091821_2056185.default.fail_eacces 0 0.00 Connections failed with EACCES or EPERM
|
||||
VBE.reload_20210723_091821_2056185.default.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL
|
||||
VBE.reload_20210723_091821_2056185.default.fail_econnrefused 0 0.00 Connections failed with ECONNREFUSED
|
||||
VBE.reload_20210723_091821_2056185.default.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH
|
||||
VBE.reload_20210723_091821_2056185.default.fail_etimedout 0 0.00 Connections failed ETIMEDOUT
|
||||
VBE.reload_20210723_091821_2056185.default.fail_other 0 0.00 Connections failed for other reason
|
||||
VBE.reload_20210723_091821_2056185.default.helddown 0 0.00 Connection opens not attempted
|
||||
VBE.reload_20210723_091821_2056185.server1.happy 48 . Happy health probes
|
||||
VBE.reload_20210723_091821_2056185.server1.bereq_hdrbytes 0 0.00 Request header bytes
|
||||
VBE.reload_20210723_091821_2056185.server1.bereq_bodybytes 0 0.00 Request body bytes
|
||||
VBE.reload_20210723_091821_2056185.server1.beresp_hdrbytes 0 0.00 Response header bytes
|
||||
VBE.reload_20210723_091821_2056185.server1.beresp_bodybytes 0 0.00 Response body bytes
|
||||
VBE.reload_20210723_091821_2056185.server1.pipe_hdrbytes 0 0.00 Pipe request header bytes
|
||||
VBE.reload_20210723_091821_2056185.server1.pipe_out 0 0.00 Piped bytes to backend
|
||||
VBE.reload_20210723_091821_2056185.server1.pipe_in 0 0.00 Piped bytes from backend
|
||||
VBE.reload_20210723_091821_2056185.server1.conn 0 . Concurrent connections to backend
|
||||
VBE.reload_20210723_091821_2056185.server1.req 0 0.00 Backend requests sent
|
||||
VBE.reload_20210723_091821_2056185.server1.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy
|
||||
VBE.reload_20210723_091821_2056185.server1.busy 0 0.00 Fetches not attempted due to backend being busy
|
||||
VBE.reload_20210723_091821_2056185.server1.fail 0 0.00 Connections failed
|
||||
VBE.reload_20210723_091821_2056185.server1.fail_eacces 0 0.00 Connections failed with EACCES or EPERM
|
||||
VBE.reload_20210723_091821_2056185.server1.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL
|
||||
VBE.reload_20210723_091821_2056185.server1.fail_econnrefused 4 0.00 Connections failed with ECONNREFUSED
|
||||
VBE.reload_20210723_091821_2056185.server1.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH
|
||||
VBE.reload_20210723_091821_2056185.server1.fail_etimedout 0 0.00 Connections failed ETIMEDOUT
|
||||
VBE.reload_20210723_091821_2056185.server1.fail_other 0 0.00 Connections failed for other reason
|
||||
VBE.reload_20210723_091821_2056185.server1.helddown 0 0.00 Connection opens not attempted
|
||||
VBE.reload_20210723_091821_2056185.server2.happy 48 . Happy health probes
|
||||
VBE.reload_20210723_091821_2056185.server2.bereq_hdrbytes 0 0.00 Request header bytes
|
||||
VBE.reload_20210723_091821_2056185.server2.bereq_bodybytes 0 0.00 Request body bytes
|
||||
VBE.reload_20210723_091821_2056185.server2.beresp_hdrbytes 0 0.00 Response header bytes
|
||||
VBE.reload_20210723_091821_2056185.server2.beresp_bodybytes 0 0.00 Response body bytes
|
||||
VBE.reload_20210723_091821_2056185.server2.pipe_hdrbytes 0 0.00 Pipe request header bytes
|
||||
VBE.reload_20210723_091821_2056185.server2.pipe_out 0 0.00 Piped bytes to backend
|
||||
VBE.reload_20210723_091821_2056185.server2.pipe_in 0 0.00 Piped bytes from backend
|
||||
VBE.reload_20210723_091821_2056185.server2.conn 0 . Concurrent connections to backend
|
||||
VBE.reload_20210723_091821_2056185.server2.req 0 0.00 Backend requests sent
|
||||
VBE.reload_20210723_091821_2056185.server2.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy
|
||||
VBE.reload_20210723_091821_2056185.server2.busy 0 0.00 Fetches not attempted due to backend being busy
|
||||
VBE.reload_20210723_091821_2056185.server2.fail 0 0.00 Connections failed
|
||||
VBE.reload_20210723_091821_2056185.server2.fail_eacces 0 0.00 Connections failed with EACCES or EPERM
|
||||
VBE.reload_20210723_091821_2056185.server2.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL
|
||||
VBE.reload_20210723_091821_2056185.server2.fail_econnrefused 4 0.00 Connections failed with ECONNREFUSED
|
||||
VBE.reload_20210723_091821_2056185.server2.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH
|
||||
VBE.reload_20210723_091821_2056185.server2.fail_etimedout 0 0.00 Connections failed ETIMEDOUT
|
||||
VBE.reload_20210723_091821_2056185.server2.fail_other 0 0.00 Connections failed for other reason
|
||||
VBE.reload_20210723_091821_2056185.server2.helddown 0 0.00 Connection opens not attempted
|
10
plugins/inputs/varnish/test_data/varnishadm-200.json
Normal file
10
plugins/inputs/varnish/test_data/varnishadm-200.json
Normal file
|
@ -0,0 +1,10 @@
|
|||
200
|
||||
[ 2, ["vcl.list", "-j"], 1631019726.316,
|
||||
{
|
||||
"status": "active",
|
||||
"state": "auto",
|
||||
"temperature": "warm",
|
||||
"busy": 0,
|
||||
"name": "boot-123"
|
||||
}
|
||||
]
|
51
plugins/inputs/varnish/test_data/varnishadm-reload.json
Normal file
51
plugins/inputs/varnish/test_data/varnishadm-reload.json
Normal file
|
@ -0,0 +1,51 @@
|
|||
[ 2, ["vcl.list", "-j"], 1631029773.809,
|
||||
{
|
||||
"status": "available",
|
||||
"state": "cold",
|
||||
"temperature": "cold",
|
||||
"busy": 0,
|
||||
"name": "boot"
|
||||
},
|
||||
{
|
||||
"status": "available",
|
||||
"state": "cold",
|
||||
"temperature": "cold",
|
||||
"busy": 0,
|
||||
"name": "reload_20210719_143559_60674"
|
||||
},
|
||||
{
|
||||
"status": "available",
|
||||
"state": "cold",
|
||||
"temperature": "cold",
|
||||
"busy": 0,
|
||||
"name": "test"
|
||||
},
|
||||
{
|
||||
"status": "available",
|
||||
"state": "cold",
|
||||
"temperature": "cold",
|
||||
"busy": 0,
|
||||
"name": "test2"
|
||||
},
|
||||
{
|
||||
"status": "available",
|
||||
"state": "cold",
|
||||
"temperature": "cold",
|
||||
"busy": 0,
|
||||
"name": "test3"
|
||||
},
|
||||
{
|
||||
"status": "available",
|
||||
"state": "cold",
|
||||
"temperature": "cold",
|
||||
"busy": 0,
|
||||
"name": "reload_20210722_162225_1979744"
|
||||
},
|
||||
{
|
||||
"status": "active",
|
||||
"state": "auto",
|
||||
"temperature": "warm",
|
||||
"busy": 0,
|
||||
"name": "reload_20210723_091821_2056185"
|
||||
}
|
||||
]
|
412
plugins/inputs/varnish/varnish.go
Normal file
412
plugins/inputs/varnish/varnish.go
Normal file
|
@ -0,0 +1,412 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
//go:build !windows
|
||||
|
||||
package varnish
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
var (
|
||||
measurementNamespace = "varnish"
|
||||
defaultStats = []string{"MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"}
|
||||
defaultStatBinary = "/usr/bin/varnishstat"
|
||||
defaultAdmBinary = "/usr/bin/varnishadm"
|
||||
defaultTimeout = config.Duration(time.Second)
|
||||
|
||||
// vcl name and backend restriction regexp [A-Za-z][A-Za-z0-9_-]*
|
||||
defaultRegexps = []*regexp.Regexp{
|
||||
// dynamic backends
|
||||
//nolint:lll // conditionally long line allowed to have a better understanding of following regexp
|
||||
// VBE.VCL_xxxx_xxx_VOD_SHIELD_Vxxxxxxxxxxxxx_xxxxxxxxxxxxx.goto.000007c8.(xx.xx.xxx.xx).(http://xxxxxxx-xxxxx-xxxxx-xxxxxx-xx-xxxx-x-xxxx.xx-xx-xxxx-x.amazonaws.com:80).(ttl:5.000000).fail_eaddrnotavail
|
||||
regexp.MustCompile(
|
||||
`^VBE\.(?P<_vcl>[\w\-]*)\.goto\.[[:alnum:]]+\.\((?P<backend>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P<server>.*)\)\.\(ttl:\d*\.\d*.*\)`,
|
||||
),
|
||||
|
||||
// VBE.reload_20210622_153544_23757.default.unhealthy
|
||||
regexp.MustCompile(`^VBE\.(?P<_vcl>[\w\-]*)\.(?P<backend>[\w\-]*)\.([\w\-]*)`),
|
||||
|
||||
// KVSTORE values
|
||||
regexp.MustCompile(`^KVSTORE\.(?P<id>[\w\-]*)\.(?P<_vcl>[\w\-]*)\.([\w\-]*)`),
|
||||
|
||||
// XCNT.abc1234.XXX+_YYYY.cr.pass.val
|
||||
regexp.MustCompile(`^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P<group>[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val`),
|
||||
|
||||
// generic metric like MSE_STORE.store-1-1.g_aio_running_bytes_write
|
||||
regexp.MustCompile(`([\w\-]*)\.(?P<_field>[\w\-.]*)`),
|
||||
}
|
||||
)
|
||||
|
||||
type runner func(cmdName string, useSudo bool, args []string, timeout config.Duration) (*bytes.Buffer, error)
|
||||
|
||||
type Varnish struct {
|
||||
Stats []string
|
||||
Binary string
|
||||
BinaryArgs []string
|
||||
AdmBinary string
|
||||
AdmBinaryArgs []string
|
||||
UseSudo bool
|
||||
InstanceName string
|
||||
Timeout config.Duration
|
||||
Regexps []string
|
||||
MetricVersion int
|
||||
|
||||
filter filter.Filter
|
||||
run runner
|
||||
admRun runner
|
||||
regexpsCompiled []*regexp.Regexp
|
||||
}
|
||||
|
||||
// Shell out to varnish cli and return the output
|
||||
func varnishRunner(cmdName string, useSudo bool, cmdArgs []string, timeout config.Duration) (*bytes.Buffer, error) {
|
||||
cmd := exec.Command(cmdName, cmdArgs...)
|
||||
|
||||
if useSudo {
|
||||
cmdArgs = append([]string{cmdName}, cmdArgs...)
|
||||
cmdArgs = append([]string{"-n"}, cmdArgs...)
|
||||
cmd = exec.Command("sudo", cmdArgs...)
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
|
||||
err := internal.RunTimeout(cmd, time.Duration(timeout))
|
||||
if err != nil {
|
||||
return &out, fmt.Errorf("error running %q %q: %w", cmdName, cmdArgs, err)
|
||||
}
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (*Varnish) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (s *Varnish) Init() error {
|
||||
customRegexps := make([]*regexp.Regexp, 0, len(s.Regexps))
|
||||
for _, re := range s.Regexps {
|
||||
compiled, err := regexp.Compile(re)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing regexp: %w", err)
|
||||
}
|
||||
customRegexps = append(customRegexps, compiled)
|
||||
}
|
||||
s.regexpsCompiled = append(customRegexps, s.regexpsCompiled...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gather collects the configured stats from varnish_stat and adds them to the
|
||||
// Accumulator
|
||||
//
|
||||
// The prefix of each stat (eg MAIN, MEMPOOL, LCK, etc) will be used as a
|
||||
// 'section' tag and all stats that share that prefix will be reported as fields
|
||||
// with that tag
|
||||
func (s *Varnish) Gather(acc telegraf.Accumulator) error {
|
||||
if s.filter == nil {
|
||||
var err error
|
||||
if len(s.Stats) == 0 {
|
||||
s.filter, err = filter.Compile(defaultStats)
|
||||
} else {
|
||||
// legacy support, change "all" -> "*":
|
||||
if s.Stats[0] == "all" {
|
||||
s.Stats[0] = "*"
|
||||
}
|
||||
s.filter, err = filter.Compile(s.Stats)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
admArgs, statsArgs := s.prepareCmdArgs()
|
||||
|
||||
statOut, err := s.run(s.Binary, s.UseSudo, statsArgs, s.Timeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error gathering metrics: %w", err)
|
||||
}
|
||||
|
||||
if s.MetricVersion == 2 {
|
||||
// run varnishadm to get active vcl
|
||||
var activeVcl = "boot"
|
||||
if s.admRun != nil {
|
||||
admOut, err := s.admRun(s.AdmBinary, s.UseSudo, admArgs, s.Timeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error gathering metrics: %w", err)
|
||||
}
|
||||
activeVcl, err = getActiveVCLJson(admOut)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error gathering metrics: %w", err)
|
||||
}
|
||||
}
|
||||
return s.processMetricsV2(activeVcl, acc, statOut)
|
||||
}
|
||||
return s.processMetricsV1(acc, statOut)
|
||||
}
|
||||
|
||||
// Prepare varnish cli tools arguments
|
||||
func (s *Varnish) prepareCmdArgs() (adm, stats []string) {
|
||||
// default varnishadm arguments
|
||||
admArgs := []string{"vcl.list", "-j"}
|
||||
|
||||
// default varnish stats arguments
|
||||
statsArgs := []string{"-j"}
|
||||
if s.MetricVersion == 1 {
|
||||
statsArgs = []string{"-1"}
|
||||
}
|
||||
|
||||
// add optional instance name
|
||||
if s.InstanceName != "" {
|
||||
statsArgs = append(statsArgs, []string{"-n", s.InstanceName}...)
|
||||
admArgs = append([]string{"-n", s.InstanceName}, admArgs...)
|
||||
}
|
||||
|
||||
// override custom arguments
|
||||
if len(s.AdmBinaryArgs) > 0 {
|
||||
admArgs = s.AdmBinaryArgs
|
||||
}
|
||||
// override custom arguments
|
||||
if len(s.BinaryArgs) > 0 {
|
||||
statsArgs = s.BinaryArgs
|
||||
}
|
||||
return admArgs, statsArgs
|
||||
}
|
||||
|
||||
func (s *Varnish) processMetricsV1(acc telegraf.Accumulator, out *bytes.Buffer) error {
|
||||
sectionMap := make(map[string]map[string]interface{})
|
||||
scanner := bufio.NewScanner(out)
|
||||
for scanner.Scan() {
|
||||
cols := strings.Fields(scanner.Text())
|
||||
if len(cols) < 2 {
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(cols[0], ".") {
|
||||
continue
|
||||
}
|
||||
|
||||
stat := cols[0]
|
||||
value := cols[1]
|
||||
|
||||
if s.filter != nil && !s.filter.Match(stat) {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.SplitN(stat, ".", 2)
|
||||
section := parts[0]
|
||||
field := parts[1]
|
||||
|
||||
// Init the section if necessary
|
||||
if _, ok := sectionMap[section]; !ok {
|
||||
sectionMap[section] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
var err error
|
||||
sectionMap[section][field], err = strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("expected a numeric value for %s = %v", stat, value))
|
||||
}
|
||||
}
|
||||
|
||||
for section, fields := range sectionMap {
|
||||
tags := map[string]string{
|
||||
"section": section,
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
acc.AddFields("varnish", fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// metrics version 2 - parsing json
|
||||
func (s *Varnish) processMetricsV2(activeVcl string, acc telegraf.Accumulator, out *bytes.Buffer) error {
|
||||
rootJSON := make(map[string]interface{})
|
||||
dec := json.NewDecoder(out)
|
||||
dec.UseNumber()
|
||||
if err := dec.Decode(&rootJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
countersJSON := getCountersJSON(rootJSON)
|
||||
timestamp := time.Now()
|
||||
for fieldName, raw := range countersJSON {
|
||||
if fieldName == "timestamp" {
|
||||
continue
|
||||
}
|
||||
if s.filter != nil && !s.filter.Match(fieldName) {
|
||||
continue
|
||||
}
|
||||
data, ok := raw.(map[string]interface{})
|
||||
if !ok {
|
||||
acc.AddError(fmt.Errorf("unexpected data from json: %s: %#v", fieldName, raw))
|
||||
continue
|
||||
}
|
||||
|
||||
var metricValue interface{}
|
||||
var parseError error
|
||||
flag := data["flag"]
|
||||
|
||||
if value, ok := data["value"]; ok {
|
||||
if number, ok := value.(json.Number); ok {
|
||||
// parse bitmap value
|
||||
if flag == "b" {
|
||||
if metricValue, parseError = strconv.ParseUint(number.String(), 10, 64); parseError != nil {
|
||||
parseError = fmt.Errorf("%q value uint64 error: %w", fieldName, parseError)
|
||||
}
|
||||
} else if metricValue, parseError = number.Int64(); parseError != nil {
|
||||
// try parse float
|
||||
if metricValue, parseError = number.Float64(); parseError != nil {
|
||||
parseError = fmt.Errorf("stat %q value %q is not valid number: %w", fieldName, value, parseError)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
metricValue = value
|
||||
}
|
||||
}
|
||||
|
||||
if parseError != nil {
|
||||
acc.AddError(parseError)
|
||||
continue
|
||||
}
|
||||
|
||||
metric := s.parseMetricV2(fieldName)
|
||||
if metric.vclName != "" && activeVcl != "" && metric.vclName != activeVcl {
|
||||
// skip not active vcl
|
||||
continue
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
fields[metric.fieldName] = metricValue
|
||||
switch flag {
|
||||
case "c", "a":
|
||||
acc.AddCounter(metric.measurement, fields, metric.tags, timestamp)
|
||||
case "g":
|
||||
acc.AddGauge(metric.measurement, fields, metric.tags, timestamp)
|
||||
default:
|
||||
acc.AddGauge(metric.measurement, fields, metric.tags, timestamp)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse the output of "varnishadm vcl.list -j" and find active vcls
|
||||
func getActiveVCLJson(out io.Reader) (string, error) {
|
||||
var output = ""
|
||||
if b, err := io.ReadAll(out); err == nil {
|
||||
output = string(b)
|
||||
}
|
||||
// workaround for non valid json in varnish 6.6.1 https://github.com/varnishcache/varnish-cache/issues/3687
|
||||
output = strings.TrimPrefix(output, "200")
|
||||
|
||||
var jsonOut []interface{}
|
||||
err := json.Unmarshal([]byte(output), &jsonOut)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, item := range jsonOut {
|
||||
switch s := item.(type) {
|
||||
case []interface{}:
|
||||
command := s[0]
|
||||
if command != "vcl.list" {
|
||||
return "", fmt.Errorf("unsupported varnishadm command %v", jsonOut[1])
|
||||
}
|
||||
case map[string]interface{}:
|
||||
if s["status"] == "active" {
|
||||
return s["name"].(string), nil
|
||||
}
|
||||
default:
|
||||
// ignore
|
||||
continue
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Gets the "counters" section from varnishstat json (there is change in schema structure in varnish 6.5+)
|
||||
func getCountersJSON(rootJSON map[string]interface{}) map[string]interface{} {
|
||||
// version 1 contains "counters" wrapper
|
||||
if counters, exists := rootJSON["counters"]; exists {
|
||||
return counters.(map[string]interface{})
|
||||
}
|
||||
return rootJSON
|
||||
}
|
||||
|
||||
// converts varnish metrics name into field and list of tags
|
||||
func (s *Varnish) parseMetricV2(name string) (metric varnishMetric) {
|
||||
metric.measurement = measurementNamespace
|
||||
if strings.Count(name, ".") == 0 {
|
||||
return metric
|
||||
}
|
||||
metric.fieldName = name[strings.LastIndex(name, ".")+1:]
|
||||
var section = strings.Split(name, ".")[0]
|
||||
metric.tags = map[string]string{
|
||||
"section": section,
|
||||
}
|
||||
|
||||
// parse name using regexpsCompiled
|
||||
for _, re := range s.regexpsCompiled {
|
||||
submatch := re.FindStringSubmatch(name)
|
||||
if len(submatch) < 1 {
|
||||
continue
|
||||
}
|
||||
for _, sub := range re.SubexpNames() {
|
||||
if sub == "" {
|
||||
continue
|
||||
}
|
||||
val := submatch[re.SubexpIndex(sub)]
|
||||
if sub == "_vcl" {
|
||||
metric.vclName = val
|
||||
} else if sub == "_field" {
|
||||
metric.fieldName = val
|
||||
} else if val != "" {
|
||||
metric.tags[sub] = val
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
type varnishMetric struct {
|
||||
measurement string
|
||||
fieldName string
|
||||
tags map[string]string
|
||||
vclName string
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("varnish", func() telegraf.Input {
|
||||
return &Varnish{
|
||||
run: varnishRunner,
|
||||
admRun: varnishRunner,
|
||||
regexpsCompiled: defaultRegexps,
|
||||
Stats: defaultStats,
|
||||
Binary: defaultStatBinary,
|
||||
AdmBinary: defaultAdmBinary,
|
||||
MetricVersion: 1,
|
||||
UseSudo: false,
|
||||
InstanceName: "",
|
||||
Timeout: defaultTimeout,
|
||||
}
|
||||
})
|
||||
}
|
634
plugins/inputs/varnish/varnish_test.go
Normal file
634
plugins/inputs/varnish/varnish_test.go
Normal file
|
@ -0,0 +1,634 @@
|
|||
//go:build !windows
|
||||
|
||||
package varnish
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func fakeVarnishRunner(output string) func(string, bool, []string, config.Duration) (*bytes.Buffer, error) {
|
||||
return func(string, bool, []string, config.Duration) (*bytes.Buffer, error) {
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
acc := &testutil.Accumulator{}
|
||||
v := &Varnish{
|
||||
run: fakeVarnishRunner(smOutput),
|
||||
Stats: []string{"*"},
|
||||
}
|
||||
require.NoError(t, v.Gather(acc))
|
||||
|
||||
acc.HasMeasurement("varnish")
|
||||
for tag, fields := range parsedSmOutput {
|
||||
acc.AssertContainsTaggedFields(t, "varnish", fields, map[string]string{
|
||||
"section": tag,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFullOutput(t *testing.T) {
|
||||
acc := &testutil.Accumulator{}
|
||||
v := &Varnish{
|
||||
run: fakeVarnishRunner(fullOutput),
|
||||
Stats: []string{"*"},
|
||||
}
|
||||
require.NoError(t, v.Gather(acc))
|
||||
|
||||
acc.HasMeasurement("varnish")
|
||||
flat := flatten(acc.Metrics)
|
||||
require.Len(t, acc.Metrics, 6)
|
||||
require.Len(t, flat, 293)
|
||||
}
|
||||
|
||||
func TestFilterSomeStats(t *testing.T) {
|
||||
acc := &testutil.Accumulator{}
|
||||
v := &Varnish{
|
||||
run: fakeVarnishRunner(fullOutput),
|
||||
Stats: []string{"MGT.*", "VBE.*"},
|
||||
}
|
||||
require.NoError(t, v.Gather(acc))
|
||||
|
||||
acc.HasMeasurement("varnish")
|
||||
flat := flatten(acc.Metrics)
|
||||
require.Len(t, acc.Metrics, 2)
|
||||
require.Len(t, flat, 16)
|
||||
}
|
||||
|
||||
func TestFieldConfig(t *testing.T) {
|
||||
expect := map[string]int{
|
||||
"*": 293,
|
||||
"": 0, // default
|
||||
"MAIN.uptime": 1,
|
||||
"MEMPOOL.req0.sz_needed,MAIN.fetch_bad": 2,
|
||||
}
|
||||
|
||||
for fieldCfg, expected := range expect {
|
||||
acc := &testutil.Accumulator{}
|
||||
v := &Varnish{
|
||||
run: fakeVarnishRunner(fullOutput),
|
||||
Stats: strings.Split(fieldCfg, ","),
|
||||
}
|
||||
require.NoError(t, v.Gather(acc))
|
||||
|
||||
acc.HasMeasurement("varnish")
|
||||
flat := flatten(acc.Metrics)
|
||||
require.Len(t, flat, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func flatten(metrics []*testutil.Metric) map[string]interface{} {
|
||||
flat := map[string]interface{}{}
|
||||
for _, m := range metrics {
|
||||
buf := &bytes.Buffer{}
|
||||
for k, v := range m.Tags {
|
||||
fmt.Fprintf(buf, "%s=%s", k, v)
|
||||
}
|
||||
for k, v := range m.Fields {
|
||||
flat[fmt.Sprintf("%s %s", buf.String(), k)] = v
|
||||
}
|
||||
}
|
||||
return flat
|
||||
}
|
||||
|
||||
var smOutput = `
|
||||
MAIN.uptime 895 1.00 Child process uptime
|
||||
MAIN.cache_hit 95 0.00 Cache hits
|
||||
MAIN.cache_miss 5 0.00 Cache misses
|
||||
MGT.uptime 896 1.00 Management process uptime
|
||||
MGT.child_start 1 0.00 Child process started
|
||||
MEMPOOL.vbc.live 0 . In use
|
||||
MEMPOOL.vbc.pool 10 . In Pool
|
||||
MEMPOOL.vbc.sz_wanted 88 . Size requested
|
||||
`
|
||||
|
||||
var parsedSmOutput = map[string]map[string]interface{}{
|
||||
"MAIN": {
|
||||
"uptime": uint64(895),
|
||||
"cache_hit": uint64(95),
|
||||
"cache_miss": uint64(5),
|
||||
},
|
||||
"MGT": {
|
||||
"uptime": uint64(896),
|
||||
"child_start": uint64(1),
|
||||
},
|
||||
"MEMPOOL": {
|
||||
"vbc.live": uint64(0),
|
||||
"vbc.pool": uint64(10),
|
||||
"vbc.sz_wanted": uint64(88),
|
||||
},
|
||||
}
|
||||
|
||||
var fullOutput = `
|
||||
MAIN.uptime 2872 1.00 Child process uptime
|
||||
MAIN.sess_conn 0 0.00 Sessions accepted
|
||||
MAIN.sess_drop 0 0.00 Sessions dropped
|
||||
MAIN.sess_fail 0 0.00 Session accept failures
|
||||
MAIN.sess_pipe_overflow 0 0.00 Session pipe overflow
|
||||
MAIN.client_req_400 0 0.00 Client requests received, subject to 400 errors
|
||||
MAIN.client_req_411 0 0.00 Client requests received, subject to 411 errors
|
||||
MAIN.client_req_413 0 0.00 Client requests received, subject to 413 errors
|
||||
MAIN.client_req_417 0 0.00 Client requests received, subject to 417 errors
|
||||
MAIN.client_req 0 0.00 Good client requests received
|
||||
MAIN.cache_hit 0 0.00 Cache hits
|
||||
MAIN.cache_hitpass 0 0.00 Cache hits for pass
|
||||
MAIN.cache_miss 0 0.00 Cache misses
|
||||
MAIN.backend_conn 0 0.00 Backend conn. success
|
||||
MAIN.backend_unhealthy 0 0.00 Backend conn. not attempted
|
||||
MAIN.backend_busy 0 0.00 Backend conn. too many
|
||||
MAIN.backend_fail 0 0.00 Backend conn. failures
|
||||
MAIN.backend_reuse 0 0.00 Backend conn. reuses
|
||||
MAIN.backend_toolate 0 0.00 Backend conn. was closed
|
||||
MAIN.backend_recycle 0 0.00 Backend conn. recycles
|
||||
MAIN.backend_retry 0 0.00 Backend conn. retry
|
||||
MAIN.fetch_head 0 0.00 Fetch no body (HEAD)
|
||||
MAIN.fetch_length 0 0.00 Fetch with Length
|
||||
MAIN.fetch_chunked 0 0.00 Fetch chunked
|
||||
MAIN.fetch_eof 0 0.00 Fetch EOF
|
||||
MAIN.fetch_bad 0 0.00 Fetch bad T-E
|
||||
MAIN.fetch_close 0 0.00 Fetch wanted close
|
||||
MAIN.fetch_oldhttp 0 0.00 Fetch pre HTTP/1.1 closed
|
||||
MAIN.fetch_zero 0 0.00 Fetch zero len body
|
||||
MAIN.fetch_1xx 0 0.00 Fetch no body (1xx)
|
||||
MAIN.fetch_204 0 0.00 Fetch no body (204)
|
||||
MAIN.fetch_304 0 0.00 Fetch no body (304)
|
||||
MAIN.fetch_failed 0 0.00 Fetch failed (all causes)
|
||||
MAIN.fetch_no_thread 0 0.00 Fetch failed (no thread)
|
||||
MAIN.pools 2 . Number of thread pools
|
||||
MAIN.threads 200 . Total number of threads
|
||||
MAIN.threads_limited 0 0.00 Threads hit max
|
||||
MAIN.threads_created 200 0.07 Threads created
|
||||
MAIN.threads_destroyed 0 0.00 Threads destroyed
|
||||
MAIN.threads_failed 0 0.00 Thread creation failed
|
||||
MAIN.thread_queue_len 0 . Length of session queue
|
||||
MAIN.busy_sleep 0 0.00 Number of requests sent to sleep on busy objhdr
|
||||
MAIN.busy_wakeup 0 0.00 Number of requests woken after sleep on busy objhdr
|
||||
MAIN.sess_queued 0 0.00 Sessions queued for thread
|
||||
MAIN.sess_dropped 0 0.00 Sessions dropped for thread
|
||||
MAIN.n_object 0 . object structs made
|
||||
MAIN.n_vampireobject 0 . unresurrected objects
|
||||
MAIN.n_objectcore 0 . objectcore structs made
|
||||
MAIN.n_objecthead 0 . objecthead structs made
|
||||
MAIN.n_waitinglist 0 . waitinglist structs made
|
||||
MAIN.n_backend 1 . Number of backends
|
||||
MAIN.n_expired 0 . Number of expired objects
|
||||
MAIN.n_lru_nuked 0 . Number of LRU nuked objects
|
||||
MAIN.n_lru_moved 0 . Number of LRU moved objects
|
||||
MAIN.losthdr 0 0.00 HTTP header overflows
|
||||
MAIN.s_sess 0 0.00 Total sessions seen
|
||||
MAIN.s_req 0 0.00 Total requests seen
|
||||
MAIN.s_pipe 0 0.00 Total pipe sessions seen
|
||||
MAIN.s_pass 0 0.00 Total pass-ed requests seen
|
||||
MAIN.s_fetch 0 0.00 Total backend fetches initiated
|
||||
MAIN.s_synth 0 0.00 Total synthetic responses made
|
||||
MAIN.s_req_hdrbytes 0 0.00 Request header bytes
|
||||
MAIN.s_req_bodybytes 0 0.00 Request body bytes
|
||||
MAIN.s_resp_hdrbytes 0 0.00 Response header bytes
|
||||
MAIN.s_resp_bodybytes 0 0.00 Response body bytes
|
||||
MAIN.s_pipe_hdrbytes 0 0.00 Pipe request header bytes
|
||||
MAIN.s_pipe_in 0 0.00 Piped bytes from client
|
||||
MAIN.s_pipe_out 0 0.00 Piped bytes to client
|
||||
MAIN.sess_closed 0 0.00 Session Closed
|
||||
MAIN.sess_pipeline 0 0.00 Session Pipeline
|
||||
MAIN.sess_readahead 0 0.00 Session Read Ahead
|
||||
MAIN.sess_herd 0 0.00 Session herd
|
||||
MAIN.shm_records 1918 0.67 SHM records
|
||||
MAIN.shm_writes 1918 0.67 SHM writes
|
||||
MAIN.shm_flushes 0 0.00 SHM flushes due to overflow
|
||||
MAIN.shm_cont 0 0.00 SHM MTX contention
|
||||
MAIN.shm_cycles 0 0.00 SHM cycles through buffer
|
||||
MAIN.sms_nreq 0 0.00 SMS allocator requests
|
||||
MAIN.sms_nobj 0 . SMS outstanding allocations
|
||||
MAIN.sms_nbytes 0 . SMS outstanding bytes
|
||||
MAIN.sms_balloc 0 . SMS bytes allocated
|
||||
MAIN.sms_bfree 0 . SMS bytes freed
|
||||
MAIN.backend_req 0 0.00 Backend requests made
|
||||
MAIN.n_vcl 1 0.00 Number of loaded VCLs in total
|
||||
MAIN.n_vcl_avail 1 0.00 Number of VCLs available
|
||||
MAIN.n_vcl_discard 0 0.00 Number of discarded VCLs
|
||||
MAIN.bans 1 . Count of bans
|
||||
MAIN.bans_completed 1 . Number of bans marked 'completed'
|
||||
MAIN.bans_obj 0 . Number of bans using obj.*
|
||||
MAIN.bans_req 0 . Number of bans using req.*
|
||||
MAIN.bans_added 1 0.00 Bans added
|
||||
MAIN.bans_deleted 0 0.00 Bans deleted
|
||||
MAIN.bans_tested 0 0.00 Bans tested against objects (lookup)
|
||||
MAIN.bans_obj_killed 0 0.00 Objects killed by bans (lookup)
|
||||
MAIN.bans_lurker_tested 0 0.00 Bans tested against objects (lurker)
|
||||
MAIN.bans_tests_tested 0 0.00 Ban tests tested against objects (lookup)
|
||||
MAIN.bans_lurker_tests_tested 0 0.00 Ban tests tested against objects (lurker)
|
||||
MAIN.bans_lurker_obj_killed 0 0.00 Objects killed by bans (lurker)
|
||||
MAIN.bans_dups 0 0.00 Bans superseded by other bans
|
||||
MAIN.bans_lurker_contention 0 0.00 Lurker gave way for lookup
|
||||
MAIN.bans_persisted_bytes 13 . Bytes used by the persisted ban lists
|
||||
MAIN.bans_persisted_fragmentation 0 . Extra bytes in persisted ban lists due to fragmentation
|
||||
MAIN.n_purges 0 . Number of purge operations executed
|
||||
MAIN.n_obj_purged 0 . Number of purged objects
|
||||
MAIN.exp_mailed 0 0.00 Number of objects mailed to expiry thread
|
||||
MAIN.exp_received 0 0.00 Number of objects received by expiry thread
|
||||
MAIN.hcb_nolock 0 0.00 HCB Lookups without lock
|
||||
MAIN.hcb_lock 0 0.00 HCB Lookups with lock
|
||||
MAIN.hcb_insert 0 0.00 HCB Inserts
|
||||
MAIN.esi_errors 0 0.00 ESI parse errors (unlock)
|
||||
MAIN.esi_warnings 0 0.00 ESI parse warnings (unlock)
|
||||
MAIN.vmods 0 . Loaded VMODs
|
||||
MAIN.n_gzip 0 0.00 Gzip operations
|
||||
MAIN.n_gunzip 0 0.00 Gunzip operations
|
||||
MAIN.vsm_free 972528 . Free VSM space
|
||||
MAIN.vsm_used 83962080 . Used VSM space
|
||||
MAIN.vsm_cooling 0 . Cooling VSM space
|
||||
MAIN.vsm_overflow 0 . Overflow VSM space
|
||||
MAIN.vsm_overflowed 0 0.00 Overflowed VSM space
|
||||
MGT.uptime 2871 1.00 Management process uptime
|
||||
MGT.child_start 1 0.00 Child process started
|
||||
MGT.child_exit 0 0.00 Child process normal exit
|
||||
MGT.child_stop 0 0.00 Child process unexpected exit
|
||||
MGT.child_died 0 0.00 Child process died (signal)
|
||||
MGT.child_dump 0 0.00 Child process core dumped
|
||||
MGT.child_panic 0 0.00 Child process panic
|
||||
MEMPOOL.vbc.live 0 . In use
|
||||
MEMPOOL.vbc.pool 10 . In Pool
|
||||
MEMPOOL.vbc.sz_wanted 88 . Size requested
|
||||
MEMPOOL.vbc.sz_needed 120 . Size allocated
|
||||
MEMPOOL.vbc.allocs 0 0.00 Allocations
|
||||
MEMPOOL.vbc.frees 0 0.00 Frees
|
||||
MEMPOOL.vbc.recycle 0 0.00 Recycled from pool
|
||||
MEMPOOL.vbc.timeout 0 0.00 Timed out from pool
|
||||
MEMPOOL.vbc.toosmall 0 0.00 Too small to recycle
|
||||
MEMPOOL.vbc.surplus 0 0.00 Too many for pool
|
||||
MEMPOOL.vbc.randry 0 0.00 Pool ran dry
|
||||
MEMPOOL.busyobj.live 0 . In use
|
||||
MEMPOOL.busyobj.pool 10 . In Pool
|
||||
MEMPOOL.busyobj.sz_wanted 65536 . Size requested
|
||||
MEMPOOL.busyobj.sz_needed 65568 . Size allocated
|
||||
MEMPOOL.busyobj.allocs 0 0.00 Allocations
|
||||
MEMPOOL.busyobj.frees 0 0.00 Frees
|
||||
MEMPOOL.busyobj.recycle 0 0.00 Recycled from pool
|
||||
MEMPOOL.busyobj.timeout 0 0.00 Timed out from pool
|
||||
MEMPOOL.busyobj.toosmall 0 0.00 Too small to recycle
|
||||
MEMPOOL.busyobj.surplus 0 0.00 Too many for pool
|
||||
MEMPOOL.busyobj.randry 0 0.00 Pool ran dry
|
||||
MEMPOOL.req0.live 0 . In use
|
||||
MEMPOOL.req0.pool 10 . In Pool
|
||||
MEMPOOL.req0.sz_wanted 65536 . Size requested
|
||||
MEMPOOL.req0.sz_needed 65568 . Size allocated
|
||||
MEMPOOL.req0.allocs 0 0.00 Allocations
|
||||
MEMPOOL.req0.frees 0 0.00 Frees
|
||||
MEMPOOL.req0.recycle 0 0.00 Recycled from pool
|
||||
MEMPOOL.req0.timeout 0 0.00 Timed out from pool
|
||||
MEMPOOL.req0.toosmall 0 0.00 Too small to recycle
|
||||
MEMPOOL.req0.surplus 0 0.00 Too many for pool
|
||||
MEMPOOL.req0.randry 0 0.00 Pool ran dry
|
||||
MEMPOOL.sess0.live 0 . In use
|
||||
MEMPOOL.sess0.pool 10 . In Pool
|
||||
MEMPOOL.sess0.sz_wanted 384 . Size requested
|
||||
MEMPOOL.sess0.sz_needed 416 . Size allocated
|
||||
MEMPOOL.sess0.allocs 0 0.00 Allocations
|
||||
MEMPOOL.sess0.frees 0 0.00 Frees
|
||||
MEMPOOL.sess0.recycle 0 0.00 Recycled from pool
|
||||
MEMPOOL.sess0.timeout 0 0.00 Timed out from pool
|
||||
MEMPOOL.sess0.toosmall 0 0.00 Too small to recycle
|
||||
MEMPOOL.sess0.surplus 0 0.00 Too many for pool
|
||||
MEMPOOL.sess0.randry 0 0.00 Pool ran dry
|
||||
MEMPOOL.req1.live 0 . In use
|
||||
MEMPOOL.req1.pool 10 . In Pool
|
||||
MEMPOOL.req1.sz_wanted 65536 . Size requested
|
||||
MEMPOOL.req1.sz_needed 65568 . Size allocated
|
||||
MEMPOOL.req1.allocs 0 0.00 Allocations
|
||||
MEMPOOL.req1.frees 0 0.00 Frees
|
||||
MEMPOOL.req1.recycle 0 0.00 Recycled from pool
|
||||
MEMPOOL.req1.timeout 0 0.00 Timed out from pool
|
||||
MEMPOOL.req1.toosmall 0 0.00 Too small to recycle
|
||||
MEMPOOL.req1.surplus 0 0.00 Too many for pool
|
||||
MEMPOOL.req1.randry 0 0.00 Pool ran dry
|
||||
MEMPOOL.sess1.live 0 . In use
|
||||
MEMPOOL.sess1.pool 10 . In Pool
|
||||
MEMPOOL.sess1.sz_wanted 384 . Size requested
|
||||
MEMPOOL.sess1.sz_needed 416 . Size allocated
|
||||
MEMPOOL.sess1.allocs 0 0.00 Allocations
|
||||
MEMPOOL.sess1.frees 0 0.00 Frees
|
||||
MEMPOOL.sess1.recycle 0 0.00 Recycled from pool
|
||||
MEMPOOL.sess1.timeout 0 0.00 Timed out from pool
|
||||
MEMPOOL.sess1.toosmall 0 0.00 Too small to recycle
|
||||
MEMPOOL.sess1.surplus 0 0.00 Too many for pool
|
||||
MEMPOOL.sess1.randry 0 0.00 Pool ran dry
|
||||
SMA.s0.c_req 0 0.00 Allocator requests
|
||||
SMA.s0.c_fail 0 0.00 Allocator failures
|
||||
SMA.s0.c_bytes 0 0.00 Bytes allocated
|
||||
SMA.s0.c_freed 0 0.00 Bytes freed
|
||||
SMA.s0.g_alloc 0 . Allocations outstanding
|
||||
SMA.s0.g_bytes 0 . Bytes outstanding
|
||||
SMA.s0.g_space 268435456 . Bytes available
|
||||
SMA.Transient.c_req 0 0.00 Allocator requests
|
||||
SMA.Transient.c_fail 0 0.00 Allocator failures
|
||||
SMA.Transient.c_bytes 0 0.00 Bytes allocated
|
||||
SMA.Transient.c_freed 0 0.00 Bytes freed
|
||||
SMA.Transient.g_alloc 0 . Allocations outstanding
|
||||
SMA.Transient.g_bytes 0 . Bytes outstanding
|
||||
SMA.Transient.g_space 0 . Bytes available
|
||||
VBE.default(127.0.0.1,,8080).vcls 1 . VCL references
|
||||
VBE.default(127.0.0.1,,8080).happy 0 . Happy health probes
|
||||
VBE.default(127.0.0.1,,8080).bereq_hdrbytes 0 0.00 Request header bytes
|
||||
VBE.default(127.0.0.1,,8080).bereq_bodybytes 0 0.00 Request body bytes
|
||||
VBE.default(127.0.0.1,,8080).beresp_hdrbytes 0 0.00 Response header bytes
|
||||
VBE.default(127.0.0.1,,8080).beresp_bodybytes 0 0.00 Response body bytes
|
||||
VBE.default(127.0.0.1,,8080).pipe_hdrbytes 0 0.00 Pipe request header bytes
|
||||
VBE.default(127.0.0.1,,8080).pipe_out 0 0.00 Piped bytes to backend
|
||||
VBE.default(127.0.0.1,,8080).pipe_in 0 0.00 Piped bytes from backend
|
||||
LCK.sms.creat 0 0.00 Created locks
|
||||
LCK.sms.destroy 0 0.00 Destroyed locks
|
||||
LCK.sms.locks 0 0.00 Lock Operations
|
||||
LCK.smp.creat 0 0.00 Created locks
|
||||
LCK.smp.destroy 0 0.00 Destroyed locks
|
||||
LCK.smp.locks 0 0.00 Lock Operations
|
||||
LCK.sma.creat 2 0.00 Created locks
|
||||
LCK.sma.destroy 0 0.00 Destroyed locks
|
||||
LCK.sma.locks 0 0.00 Lock Operations
|
||||
LCK.smf.creat 0 0.00 Created locks
|
||||
LCK.smf.destroy 0 0.00 Destroyed locks
|
||||
LCK.smf.locks 0 0.00 Lock Operations
|
||||
LCK.hsl.creat 0 0.00 Created locks
|
||||
LCK.hsl.destroy 0 0.00 Destroyed locks
|
||||
LCK.hsl.locks 0 0.00 Lock Operations
|
||||
LCK.hcb.creat 1 0.00 Created locks
|
||||
LCK.hcb.destroy 0 0.00 Destroyed locks
|
||||
LCK.hcb.locks 16 0.01 Lock Operations
|
||||
LCK.hcl.creat 0 0.00 Created locks
|
||||
LCK.hcl.destroy 0 0.00 Destroyed locks
|
||||
LCK.hcl.locks 0 0.00 Lock Operations
|
||||
LCK.vcl.creat 1 0.00 Created locks
|
||||
LCK.vcl.destroy 0 0.00 Destroyed locks
|
||||
LCK.vcl.locks 2 0.00 Lock Operations
|
||||
LCK.sessmem.creat 0 0.00 Created locks
|
||||
LCK.sessmem.destroy 0 0.00 Destroyed locks
|
||||
LCK.sessmem.locks 0 0.00 Lock Operations
|
||||
LCK.sess.creat 0 0.00 Created locks
|
||||
LCK.sess.destroy 0 0.00 Destroyed locks
|
||||
LCK.sess.locks 0 0.00 Lock Operations
|
||||
LCK.wstat.creat 1 0.00 Created locks
|
||||
LCK.wstat.destroy 0 0.00 Destroyed locks
|
||||
LCK.wstat.locks 930 0.32 Lock Operations
|
||||
LCK.herder.creat 0 0.00 Created locks
|
||||
LCK.herder.destroy 0 0.00 Destroyed locks
|
||||
LCK.herder.locks 0 0.00 Lock Operations
|
||||
LCK.wq.creat 3 0.00 Created locks
|
||||
LCK.wq.destroy 0 0.00 Destroyed locks
|
||||
LCK.wq.locks 1554 0.54 Lock Operations
|
||||
LCK.objhdr.creat 1 0.00 Created locks
|
||||
LCK.objhdr.destroy 0 0.00 Destroyed locks
|
||||
LCK.objhdr.locks 0 0.00 Lock Operations
|
||||
LCK.exp.creat 1 0.00 Created locks
|
||||
LCK.exp.destroy 0 0.00 Destroyed locks
|
||||
LCK.exp.locks 915 0.32 Lock Operations
|
||||
LCK.lru.creat 2 0.00 Created locks
|
||||
LCK.lru.destroy 0 0.00 Destroyed locks
|
||||
LCK.lru.locks 0 0.00 Lock Operations
|
||||
LCK.cli.creat 1 0.00 Created locks
|
||||
LCK.cli.destroy 0 0.00 Destroyed locks
|
||||
LCK.cli.locks 970 0.34 Lock Operations
|
||||
LCK.ban.creat 1 0.00 Created locks
|
||||
LCK.ban.destroy 0 0.00 Destroyed locks
|
||||
LCK.ban.locks 9413 3.28 Lock Operations
|
||||
LCK.vbp.creat 1 0.00 Created locks
|
||||
LCK.vbp.destroy 0 0.00 Destroyed locks
|
||||
LCK.vbp.locks 0 0.00 Lock Operations
|
||||
LCK.backend.creat 1 0.00 Created locks
|
||||
LCK.backend.destroy 0 0.00 Destroyed locks
|
||||
LCK.backend.locks 0 0.00 Lock Operations
|
||||
LCK.vcapace.creat 1 0.00 Created locks
|
||||
LCK.vcapace.destroy 0 0.00 Destroyed locks
|
||||
LCK.vcapace.locks 0 0.00 Lock Operations
|
||||
LCK.nbusyobj.creat 0 0.00 Created locks
|
||||
LCK.nbusyobj.destroy 0 0.00 Destroyed locks
|
||||
LCK.nbusyobj.locks 0 0.00 Lock Operations
|
||||
LCK.busyobj.creat 0 0.00 Created locks
|
||||
LCK.busyobj.destroy 0 0.00 Destroyed locks
|
||||
LCK.busyobj.locks 0 0.00 Lock Operations
|
||||
LCK.mempool.creat 6 0.00 Created locks
|
||||
LCK.mempool.destroy 0 0.00 Destroyed locks
|
||||
LCK.mempool.locks 15306 5.33 Lock Operations
|
||||
LCK.vxid.creat 1 0.00 Created locks
|
||||
LCK.vxid.destroy 0 0.00 Destroyed locks
|
||||
LCK.vxid.locks 0 0.00 Lock Operations
|
||||
LCK.pipestat.creat 1 0.00 Created locks
|
||||
LCK.pipestat.destroy 0 0.00 Destroyed locks
|
||||
LCK.pipestat.locks 0 0.00 Lock Operations
|
||||
`
|
||||
|
||||
type testConfig struct {
|
||||
vName string
|
||||
tags map[string]string
|
||||
field string
|
||||
activeVcl string
|
||||
customRegexps []string
|
||||
}
|
||||
|
||||
func TestV2ParseVarnishNames(t *testing.T) {
|
||||
for _, c := range []testConfig{
|
||||
{
|
||||
vName: "MGT.uptime",
|
||||
tags: map[string]string{"section": "MGT"},
|
||||
field: "uptime",
|
||||
},
|
||||
{
|
||||
vName: "VBE.boot.default.fail",
|
||||
tags: map[string]string{"backend": "default", "section": "VBE"},
|
||||
field: "fail",
|
||||
activeVcl: "boot",
|
||||
},
|
||||
{
|
||||
vName: "MEMPOOL.req1.allocs",
|
||||
tags: map[string]string{"section": "MEMPOOL"},
|
||||
field: "req1.allocs",
|
||||
},
|
||||
{
|
||||
vName: "SMF.s0.c_bytes",
|
||||
tags: map[string]string{"section": "SMF"},
|
||||
field: "s0.c_bytes",
|
||||
},
|
||||
{
|
||||
vName: "VBE.reload_20210622_153544_23757.server1.happy",
|
||||
tags: map[string]string{"backend": "server1", "section": "VBE"},
|
||||
field: "happy",
|
||||
activeVcl: "reload_20210622_153544_23757",
|
||||
},
|
||||
{
|
||||
vName: "XXX.YYY.AAA",
|
||||
tags: map[string]string{"section": "XXX"},
|
||||
field: "YYY.AAA",
|
||||
},
|
||||
{
|
||||
vName: "VBE.vcl_20211502_214503.goto.000007d4.(10.100.0.1).(https://example.com:443).(ttl:10.000000).beresp_bodybytes",
|
||||
tags: map[string]string{"backend": "10.100.0.1", "server": "https://example.com:443", "section": "VBE"},
|
||||
activeVcl: "vcl_20211502_214503",
|
||||
field: "beresp_bodybytes",
|
||||
},
|
||||
{
|
||||
vName: "VBE.VCL_xxxx_xxx_VOD_SHIELD_Vxxxxxxxxxxxxx_xxxxxxxxxxxxx.default.bereq_hdrbytes",
|
||||
tags: map[string]string{"backend": "default", "section": "VBE"},
|
||||
activeVcl: "VCL_xxxx_xxx_VOD_SHIELD_Vxxxxxxxxxxxxx_xxxxxxxxxxxxx",
|
||||
field: "bereq_hdrbytes",
|
||||
},
|
||||
{
|
||||
vName: "VBE.VCL_ROUTER_V123_123.default.happy",
|
||||
tags: map[string]string{"backend": "default", "section": "VBE"},
|
||||
field: "happy",
|
||||
activeVcl: "VCL_ROUTER_V123_123",
|
||||
},
|
||||
{
|
||||
vName: "KVSTORE.ds_stats.VCL_xxxx_xxx_A_B_C.shield",
|
||||
tags: map[string]string{"id": "ds_stats", "section": "KVSTORE"},
|
||||
field: "shield",
|
||||
activeVcl: "VCL_xxxx_xxx_A_B_C",
|
||||
},
|
||||
{
|
||||
vName: "LCK.goto.director.destroy",
|
||||
tags: map[string]string{"section": "LCK"},
|
||||
field: "goto.director.destroy",
|
||||
activeVcl: "",
|
||||
},
|
||||
{
|
||||
vName: "XCNT.1111.XXX+_LINE.cr.deliver_stub_restart.val",
|
||||
tags: map[string]string{"group": "XXX+_LINE.cr", "section": "XCNT"},
|
||||
field: "deliver_stub_restart",
|
||||
activeVcl: "1111",
|
||||
},
|
||||
{
|
||||
vName: "VBE.VCL_1023_DIS_VOD_SHIELD_V1629295401194_1629295437531.goto.00000000.(111.112.113.114)." +
|
||||
"(http://abc-ede.xyz.yyy.com:80).(ttl:3600.000000).is_healthy",
|
||||
tags: map[string]string{"section": "VBE", "serial_1": "0", "backend_1": "111.112.113.114",
|
||||
"server_1": "http://abc-ede.xyz.yyy.com:80", "ttl": "3600.000000"},
|
||||
field: "is_healthy",
|
||||
activeVcl: "VCL_1023_DIS_VOD_SHIELD_V1629295401194_1629295437531",
|
||||
customRegexps: []string{
|
||||
`^VBE\.(?P<_vcl>[\w\-]*)\.goto\.(?P<serial_1>[[:alnum:]])+\.` +
|
||||
`\((?P<backend_1>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P<server_1>.*)\)\.\(ttl:(?P<ttl>\d*\.\d*.)*\)`,
|
||||
`^VBE\.(?P<_vcl>[\w\-]*)\.goto\.(?P<serial_2>[[:alnum:]])+\.` +
|
||||
`\((?P<backend_2>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P<server_2>.*)\)\.\(ttl:(?P<ttl>\d*\.\d*.)*\)`,
|
||||
},
|
||||
},
|
||||
} {
|
||||
v := &Varnish{regexpsCompiled: defaultRegexps, Regexps: c.customRegexps}
|
||||
require.NoError(t, v.Init())
|
||||
vMetric := v.parseMetricV2(c.vName)
|
||||
require.Equal(t, c.activeVcl, vMetric.vclName)
|
||||
require.Equal(t, "varnish", vMetric.measurement, c.vName)
|
||||
require.Equal(t, c.field, vMetric.fieldName)
|
||||
require.Equal(t, c.tags, vMetric.tags)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVersions(t *testing.T) {
|
||||
server := &Varnish{regexpsCompiled: defaultRegexps}
|
||||
require.NoError(t, server.Init())
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
require.Empty(t, acc.Metrics)
|
||||
|
||||
type testConfig struct {
|
||||
jsonFile string
|
||||
activeReloadPrefix string
|
||||
size int
|
||||
}
|
||||
|
||||
for _, c := range []testConfig{
|
||||
{jsonFile: "varnish_types.json", activeReloadPrefix: "", size: 3},
|
||||
{jsonFile: "varnish6.2.1_reload.json", activeReloadPrefix: "reload_20210623_170621_31083", size: 374},
|
||||
{jsonFile: "varnish6.2.1_reload.json", activeReloadPrefix: "", size: 434},
|
||||
{jsonFile: "varnish6.6.json", activeReloadPrefix: "boot", size: 358},
|
||||
{jsonFile: "varnish4_4.json", activeReloadPrefix: "boot", size: 295},
|
||||
} {
|
||||
output, err := os.ReadFile("test_data/" + c.jsonFile)
|
||||
require.NoError(t, err)
|
||||
err = server.processMetricsV2(c.activeReloadPrefix, acc, bytes.NewBuffer(output))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, acc.Metrics, c.size)
|
||||
for _, m := range acc.Metrics {
|
||||
require.NotEmpty(t, m.Fields)
|
||||
require.Equal(t, "varnish", m.Measurement)
|
||||
for field := range m.Fields {
|
||||
require.NotContains(t, field, "reload_")
|
||||
}
|
||||
for tag := range m.Tags {
|
||||
require.NotContains(t, tag, "reload_")
|
||||
}
|
||||
}
|
||||
acc.ClearMetrics()
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonTypes(t *testing.T) {
|
||||
json := `{
|
||||
"timestamp": "2021-06-23T17:06:37",
|
||||
"counters": {
|
||||
"XXX.floatTest": {
|
||||
"description": "floatTest",
|
||||
"flag": "c",
|
||||
"format": "d",
|
||||
"value": 123.45
|
||||
},
|
||||
"XXX.stringTest": {
|
||||
"description": "stringTest",
|
||||
"flag": "c",
|
||||
"format": "d",
|
||||
"value": "abc_def"
|
||||
},
|
||||
"XXX.intTest": {
|
||||
"description": "intTest",
|
||||
"flag": "c",
|
||||
"format": "d",
|
||||
"value": 12345
|
||||
},
|
||||
"XXX.uintTest": {
|
||||
"description": "intTest",
|
||||
"flag": "b",
|
||||
"format": "b",
|
||||
"value": 18446744073709551615
|
||||
}
|
||||
}}`
|
||||
exp := map[string]interface{}{
|
||||
"floatTest": 123.45,
|
||||
"stringTest": "abc_def",
|
||||
"intTest": int64(12345),
|
||||
"uintTest": uint64(18446744073709551615),
|
||||
}
|
||||
acc := &testutil.Accumulator{}
|
||||
v := &Varnish{
|
||||
run: fakeVarnishRunner(json),
|
||||
regexpsCompiled: defaultRegexps,
|
||||
Stats: []string{"*"},
|
||||
MetricVersion: 2,
|
||||
}
|
||||
require.NoError(t, v.Gather(acc))
|
||||
require.Len(t, acc.Metrics, len(exp))
|
||||
for _, metric := range acc.Metrics {
|
||||
require.Equal(t, "varnish", metric.Measurement)
|
||||
for fieldName, value := range metric.Fields {
|
||||
require.Equal(t, exp[fieldName], value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestVarnishAdmJson(t *testing.T) {
|
||||
admJSON, err := os.ReadFile("test_data/" + "varnishadm-200.json")
|
||||
require.NoError(t, err)
|
||||
activeVcl, err := getActiveVCLJson(bytes.NewBuffer(admJSON))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "boot-123", activeVcl)
|
||||
|
||||
admJSON, err = os.ReadFile("test_data/" + "varnishadm-reload.json")
|
||||
require.NoError(t, err)
|
||||
activeVcl, err = getActiveVCLJson(bytes.NewBuffer(admJSON))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "reload_20210723_091821_2056185", activeVcl)
|
||||
}
|
30
plugins/inputs/varnish/varnish_windows.go
Normal file
30
plugins/inputs/varnish/varnish_windows.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
//go:build windows
|
||||
|
||||
package varnish
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type Varnish struct {
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
func (v *Varnish) Init() error {
|
||||
v.Log.Warn("current platform is not supported")
|
||||
return nil
|
||||
}
|
||||
func (*Varnish) SampleConfig() string { return sampleConfig }
|
||||
func (*Varnish) Gather(_ telegraf.Accumulator) error { return nil }
|
||||
|
||||
func init() {
|
||||
inputs.Add("varnish", func() telegraf.Input {
|
||||
return &Varnish{}
|
||||
})
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue