1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

28
config/testdata/addressbook.proto vendored Normal file
View file

@ -0,0 +1,28 @@
syntax = "proto3";
package addressbook;
message Person {
string name = 1;
int32 id = 2; // Unique ID number for this person.
string email = 3;
uint32 age = 4;
enum PhoneType {
MOBILE = 0;
HOME = 1;
WORK = 2;
}
message PhoneNumber {
string number = 1;
PhoneType type = 2;
}
repeated PhoneNumber phones = 5;
}
message AddressBook {
repeated Person people = 1;
repeated string tags = 2;
}

4
config/testdata/azure_monitor.toml vendored Normal file
View file

@ -0,0 +1,4 @@
[[outputs.azure_monitor]]
[[outputs.azure_monitor]]
namespace_prefix = ""

2
config/testdata/default_parser.toml vendored Normal file
View file

@ -0,0 +1,2 @@
[[inputs.file]]
files = ["metrics"]

View file

@ -0,0 +1,2 @@
[[inputs.exec]]
command = '/usr/bin/echo {"value": 42}'

View file

@ -0,0 +1,8 @@
[[inputs.file]]
pass = ["foo"]
fieldpass = ["bar"]
fieldinclude = ["baz"]
drop = ["foo"]
fielddrop = ["bar"]
fieldexclude = ["baz"]

99
config/testdata/envvar_comments.toml vendored Normal file
View file

@ -0,0 +1,99 @@
# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
[global_tags]
[agent]
interval = "10s"
round_interval = true
metric_batch_size = 1000
metric_buffer_limit = 10000
collection_jitter = "0s"
flush_interval = '10s'
flush_jitter = "0s"
precision = ""
hostname = ''
omit_hostname = false
[[outputs.influxdb]]
setting1 = '#'#test
setting2 = '''#'''#test
setting3 = "#"#test
setting4 = """#"""#test
wicked1 = "\""#test
wicked2 = """\""""#test
[[inputs.cpu]]
percpu = true
#totalcpu = true
# collect_cpu_time = false
## report_active = false
[[a.plugin]]
mylist = [
"value 1", # a good value
"value 2", # a better value
"value 3", "value 4",
'value5', """tagwith#value""",
] # Should work
[[some.stuff]]
a = 'not a #comment'
b = '''not a #comment'''
c = "not a #comment"
d = """not a #comment"""
e = '''not a #comment containing "quotes"'''
f = '''not a #comment containing 'quotes'?'''
g = """not a #comment containing "quotes"?"""
# Issue #14237
[[inputs.myplugin]]
value = '''This isn't a #comment.'''
[[processors.starlark]]
script = """
# Drop fields if they contain a string.
#
# Example Input:
# measurement,host=hostname a=1,b="somestring" 1597255410000000000
#
# Example Output:
# measurement,host=hostname a=1 1597255410000000000
def apply(metric):
for k, v in metric.fields.items():
if type(v) == "string":
metric.fields.pop(k)
return metric
"""
[[processors.starlark]]
script = '''
# Drop fields if they contain a string.
#
# Example Input:
# measurement,host=hostname a=1,b="somestring" 1597255410000000000
#
# Example Output:
# measurement,host=hostname a=1 1597255410000000000
def apply(metric):
for k, v in metric.fields.items():
if type(v) == "string":
metric.fields.pop(k)
return metric
'''

View file

@ -0,0 +1,99 @@
[global_tags]
[agent]
interval = "10s"
round_interval = true
metric_batch_size = 1000
metric_buffer_limit = 10000
collection_jitter = "0s"
flush_interval = '10s'
flush_jitter = "0s"
precision = ""
hostname = ''
omit_hostname = false
[[outputs.influxdb]]
setting1 = '#'
setting2 = '''#'''
setting3 = "#"
setting4 = """#"""
wicked1 = "\""
wicked2 = """\""""
[[inputs.cpu]]
percpu = true
[[a.plugin]]
mylist = [
"value 1",
"value 2",
"value 3", "value 4",
'value5', """tagwith#value""",
]
[[some.stuff]]
a = 'not a #comment'
b = '''not a #comment'''
c = "not a #comment"
d = """not a #comment"""
e = '''not a #comment containing "quotes"'''
f = '''not a #comment containing 'quotes'?'''
g = """not a #comment containing "quotes"?"""
[[inputs.myplugin]]
value = '''This isn't a #comment.'''
[[processors.starlark]]
script = """
# Drop fields if they contain a string.
#
# Example Input:
# measurement,host=hostname a=1,b="somestring" 1597255410000000000
#
# Example Output:
# measurement,host=hostname a=1 1597255410000000000
def apply(metric):
for k, v in metric.fields.items():
if type(v) == "string":
metric.fields.pop(k)
return metric
"""
[[processors.starlark]]
script = '''
# Drop fields if they contain a string.
#
# Example Input:
# measurement,host=hostname a=1,b="somestring" 1597255410000000000
#
# Example Output:
# measurement,host=hostname a=1 1597255410000000000
def apply(metric):
for k, v in metric.fields.items():
if type(v) == "string":
metric.fields.pop(k)
return metric
'''

View file

@ -0,0 +1,2 @@
[[processors.processor]]
metricpass = '("state" in tags && tags.state == "on") || time > timestamp("2023-04-24T00:00:00Z")'

7
config/testdata/inline_table.toml vendored Normal file
View file

@ -0,0 +1,7 @@
[[outputs.http]]
headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" }
taginclude = ["org_id"]
[[outputs.http]]
headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" }
taginclude = ["org_id"]

2
config/testdata/invalid_field.toml vendored Normal file
View file

@ -0,0 +1,2 @@
[[inputs.http_listener_v2]]
not_a_field = true

View file

@ -0,0 +1,5 @@
[[inputs.parser]]
data_format = "xpath_json"
[[inputs.parser.xpath]]
not_a_field = true

View file

@ -0,0 +1,5 @@
[[inputs.parser_func]]
data_format = "xpath_json"
[[inputs.parser_func.xpath]]
not_a_field = true

View file

@ -0,0 +1,2 @@
[[processors.processor]]
not_a_field = true

View file

@ -0,0 +1,3 @@
[[processors.processor_parser]]
not_a_field = true
data_format = "influx"

View file

@ -0,0 +1,5 @@
[[processors.processor_parser]]
data_format = "xpath_json"
[[processors.processor_parser.xpath]]
not_a_field = true

View file

@ -0,0 +1,3 @@
[[processors.processor_parserfunc]]
not_a_field = true
data_format = "influx"

View file

@ -0,0 +1,5 @@
[[inputs.parser_func]]
data_format = "xpath_json"
[[inputs.parser_func.xpath]]
not_a_field = true

View file

@ -0,0 +1,3 @@
[[processors.processor_parser]]
not_a_field = true
data_format = "influx"

View file

@ -0,0 +1,3 @@
[[processors.processor_parserfunc]]
not_a_field = true
data_format = "influx"

View file

@ -0,0 +1,3 @@
[[inputs.parser]]
not_a_field = true
data_format = "influx"

View file

@ -0,0 +1,3 @@
[[inputs.parser_func]]
not_a_field = true
data_format = "influx"

4
config/testdata/non_slice_slice.toml vendored Normal file
View file

@ -0,0 +1,4 @@
[[outputs.http]]
[outputs.http.headers]
Content-Type = "application/json"
taginclude = ["org_id"]

65
config/testdata/parsers_new.toml vendored Normal file
View file

@ -0,0 +1,65 @@
[[inputs.parser_test_new]]
data_format = "collectd"
[[inputs.parser_test_new]]
data_format = "csv"
csv_header_row_count = 42
[[inputs.parser_test_new]]
data_format = "dropwizard"
[[inputs.parser_test_new]]
data_format = "form_urlencoded"
[[inputs.parser_test_new]]
data_format = "graphite"
[[inputs.parser_test_new]]
data_format = "grok"
grok_patterns = ["%{COMBINED_LOG_FORMAT}"]
[[inputs.parser_test_new]]
data_format = "influx"
[[inputs.parser_test_new]]
data_format = "json"
[[inputs.parser_test_new]]
data_format = "json_v2"
[[inputs.parser_test_new.json_v2]]
[[inputs.parser_test_new.json_v2.field]]
path = ""
rename = ""
type = "int"
[[inputs.parser_test_new]]
data_format = "logfmt"
[[inputs.parser_test_new]]
data_format = "nagios"
[[inputs.parser_test_new]]
data_format = "prometheus"
[[inputs.parser_test_new]]
data_format = "prometheusremotewrite"
[[inputs.parser_test_new]]
data_format = "value"
[[inputs.parser_test_new]]
data_format = "wavefront"
[[inputs.parser_test_new]]
data_format = "xml"
[[inputs.parser_test_new]]
data_format = "xpath_json"
[[inputs.parser_test_new]]
data_format = "xpath_msgpack"
[[inputs.parser_test_new]]
data_format = "xpath_protobuf"
xpath_protobuf_file = "testdata/addressbook.proto"
xpath_protobuf_type = "addressbook.AddressBook"

View file

@ -0,0 +1,7 @@
[[processors.processor]]
[[processors.parser_test]]
[[processors.processor_parser]]
[[processors.processor_parserfunc]]

View file

@ -0,0 +1,16 @@
[[processors.parser_test]]
[[processors.processor_parser]]
order = 2
[[processors.processor_parserfunc]]
[[processors.processor]]
order = 1
[[processors.processor_parser]]
order = 3
[[processors.processor_parserfunc]]
order = 3

View file

@ -0,0 +1,9 @@
[[processors.parser_test]]
[[processors.processor_parser]]
[[processors.processor_parserfunc]]
[[processors.processor]]
order = 1

View file

@ -0,0 +1,65 @@
[[processors.parser_test]]
data_format = "collectd"
[[processors.parser_test]]
data_format = "csv"
csv_header_row_count = 42
[[processors.parser_test]]
data_format = "dropwizard"
[[processors.parser_test]]
data_format = "form_urlencoded"
[[processors.parser_test]]
data_format = "graphite"
[[processors.parser_test]]
data_format = "grok"
grok_patterns = ["%{COMBINED_LOG_FORMAT}"]
[[processors.parser_test]]
data_format = "influx"
[[processors.parser_test]]
data_format = "json"
[[processors.parser_test]]
data_format = "json_v2"
[[processors.parser_test.json_v2]]
[[processors.parser_test.json_v2.field]]
path = ""
rename = ""
type = "int"
[[processors.parser_test]]
data_format = "logfmt"
[[processors.parser_test]]
data_format = "nagios"
[[processors.parser_test]]
data_format = "prometheus"
[[processors.parser_test]]
data_format = "prometheusremotewrite"
[[processors.parser_test]]
data_format = "value"
[[processors.parser_test]]
data_format = "wavefront"
[[processors.parser_test]]
data_format = "xml"
[[processors.parser_test]]
data_format = "xpath_json"
[[processors.parser_test]]
data_format = "xpath_msgpack"
[[processors.parser_test]]
data_format = "xpath_protobuf"
xpath_protobuf_file = "testdata/addressbook.proto"
xpath_protobuf_type = "addressbook.AddressBook"

32
config/testdata/serializers_new.toml vendored Normal file
View file

@ -0,0 +1,32 @@
[[outputs.serializer_test_new]]
data_format = "carbon2"
[[outputs.serializer_test_new]]
data_format = "csv"
[[outputs.serializer_test_new]]
data_format = "graphite"
[[outputs.serializer_test_new]]
data_format = "influx"
[[outputs.serializer_test_new]]
data_format = "json"
[[outputs.serializer_test_new]]
data_format = "msgpack"
[[outputs.serializer_test_new]]
data_format = "nowmetric"
[[outputs.serializer_test_new]]
data_format = "prometheus"
[[outputs.serializer_test_new]]
data_format = "prometheusremotewrite"
[[outputs.serializer_test_new]]
data_format = "splunkmetric"
[[outputs.serializer_test_new]]
data_format = "wavefront"

32
config/testdata/serializers_old.toml vendored Normal file
View file

@ -0,0 +1,32 @@
[[outputs.serializer_test_old]]
data_format = "carbon2"
[[outputs.serializer_test_old]]
data_format = "csv"
[[outputs.serializer_test_old]]
data_format = "graphite"
[[outputs.serializer_test_old]]
data_format = "influx"
[[outputs.serializer_test_old]]
data_format = "json"
[[outputs.serializer_test_old]]
data_format = "msgpack"
[[outputs.serializer_test_old]]
data_format = "nowmetric"
[[outputs.serializer_test_old]]
data_format = "prometheus"
[[outputs.serializer_test_old]]
data_format = "prometheusremotewrite"
[[outputs.serializer_test_old]]
data_format = "splunkmetric"
[[outputs.serializer_test_old]]
data_format = "wavefront"

11
config/testdata/single_plugin.toml vendored Normal file
View file

@ -0,0 +1,11 @@
[[inputs.memcached]]
servers = ["localhost"]
namepass = ["metricname1"]
namedrop = ["metricname2"]
fieldinclude = ["some", "strings"]
fieldexclude = ["other", "stuff"]
interval = "5s"
[inputs.memcached.tagpass]
goodtag = ["mytag"]
[inputs.memcached.tagdrop]
badtag = ["othertag"]

View file

@ -0,0 +1,35 @@
# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
[[inputs.memcached]]
# this comment line will be ignored by the parser
servers = ["$MY_TEST_SERVER"]
namepass = ["metricname1", "ip_${MY_TEST_SERVER}_name"] # this comment will be ignored as well
namedrop = ["metricname2"]
fieldinclude = ["some", "strings"]
fieldexclude = ["other", "stuff"]
interval = "$TEST_INTERVAL"
##### this input is provided to test multiline strings
command = """
Raw command which may or may not contain # in it
# is unique""" # Multiline comment black starting with #
[inputs.memcached.tagpass]
goodtag = ["mytag", """tagwith#value""",
# comment in between array items
# should ignore "quotes" in comments
'''TagWithMultilineSyntax''', ## ignore this comment
] # hastag
[inputs.memcached.tagdrop]
badtag = ["othertag"]

View file

@ -0,0 +1,5 @@
[[inputs.memcached]]
servers = [
# A comment in the array
"localhost"
]

View file

@ -0,0 +1,13 @@
[[inputs.memcached]]
servers = ["localhost"]
namepass = ["metricname1"]
namepass_separator = "."
namedrop = ["metricname2"]
namedrop_separator = "."
fieldinclude = ["some", "strings"]
fieldexclude = ["other", "stuff"]
interval = "5s"
[inputs.memcached.tagpass]
goodtag = ["mytag"]
[inputs.memcached.tagdrop]
badtag = ["othertag"]

5
config/testdata/slice_comment.toml vendored Normal file
View file

@ -0,0 +1,5 @@
[[outputs.http]]
scopes = [
# comment
"test" # comment
]

5
config/testdata/special_types.key vendored Normal file
View file

@ -0,0 +1,5 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIFYI4Hm+jRW3OC3zvoWDaCig6E7X0Ql9l8elHPU3e5+toAoGCCqGSM49
AwEHoUQDQgAEGOw1XQ84Ai3GTZJ5o5u1yTFgA3VLZTTT0oHol06LRj5Md3oRy0MQ
QO5OhsAGGz16SYcPHf77aZmf2Of6ixYaLQ==
-----END EC PRIVATE KEY-----

11
config/testdata/special_types.pem vendored Normal file
View file

@ -0,0 +1,11 @@
-----BEGIN CERTIFICATE-----
MIIBjTCCATOgAwIBAgIRALJ1hlgDYCh5dWfr6tdrBEYwCgYIKoZIzj0EAwIwFDES
MBAGA1UEAxMJbG9jYWxob3N0MB4XDTIyMDExMjA3NTgyMloXDTIyMDExMzA3NTgy
MlowFDESMBAGA1UEAxMJbG9jYWxob3N0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcD
QgAEGOw1XQ84Ai3GTZJ5o5u1yTFgA3VLZTTT0oHol06LRj5Md3oRy0MQQO5OhsAG
Gz16SYcPHf77aZmf2Of6ixYaLaNmMGQwDgYDVR0PAQH/BAQDAgeAMB0GA1UdJQQW
MBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUuKpGXAb1DaVSffJ/xuF6
FE31CC8wFAYDVR0RBA0wC4IJbG9jYWxob3N0MAoGCCqGSM49BAMCA0gAMEUCIHCb
m2phe189gftRke2Mo45lDsEAGaXsjA4lO/IOMo5lAiEA5k2X0bQfFhSfAcZPFtDI
iUwvC9SD3+CnzkP35O0jo+c=
-----END CERTIFICATE-----

8
config/testdata/special_types.toml vendored Normal file
View file

@ -0,0 +1,8 @@
[[inputs.http_listener_v2]]
write_timeout = "1s"
max_body_size = "1MiB"
paths = [ """
/path/
""" ]
tls_cert = """./testdata/special_types.pem"""
tls_key = '''./testdata/special_types.key'''

View file

@ -0,0 +1,42 @@
[[inputs.statetest]]
[[inputs.statetest]]
servers = ["myserver.com", "myserver.org"]
[[inputs.statetest]]
servers = ["myserver.org", "myserver.com"]
[[inputs.statetest]]
servers = ["myserver.org", "myserver.com"]
port = 0
[[inputs.statetest]]
servers = ["myserver.org", "myserver.com"]
port = 80
method = "strange"
[inputs.statetest.params]
a = "foo"
b = "bar"
[[inputs.statetest]]
servers = ["myserver.org", "myserver.com"]
port = 80
method = "strange"
setup = [
{name="alpha", factor=3.1415, enabled=true, bits=[1,2,3]}
]
[inputs.statetest.params]
a = "foo"
b = "bar"
[[inputs.statetest]]
servers = ["myserver.org", "myserver.com"]
port = 80
method = "strange"
setup = [
{name="alpha", factor=3.1415, enabled=true, bits=[1,2,3]},
{name="beta", factor=2.71828, enabled=true, bits=[1,2,3]}
]
[inputs.statetest.params]
a = "foo"
b = "bar"

View file

@ -0,0 +1,60 @@
[[inputs.statetest]]
servers = ["myserver.org", "myserver.com"]
port = 80
method = "strange"
setup = [
{name="alpha", factor=3.1415, enabled=true, bits=[1,2,3]},
{name="beta", factor=2.71828, enabled=true, bits=[1,2,3]}
]
[inputs.statetest.params]
a = "foo"
b = "bar"
[[inputs.statetest]]
## What a wonderful world...
servers = ["myserver.org", "myserver.com"]
port = 80
method = "strange"
setup = [
{name="alpha", factor=3.1415, enabled=true, bits=[1,2,3]},
{name="beta", factor=2.71828, enabled=true, bits=[1,2,3]}
]
[inputs.statetest.params]
a = "foo"
b = "bar"
[[inputs.statetest]]
servers = ["myserver.org", "myserver.com"]
method = "strange"
setup = [
{name="alpha", factor=3.1415, enabled=true, bits=[1,2,3]},
{name="beta", factor=2.71828, enabled=true, bits=[1,2,3]}
]
port = 80
[inputs.statetest.params]
a = "foo"
b = "bar"
[[inputs.statetest]]
servers = ["myserver.org", "myserver.com"]
port = 80
method = "strange"
setup = [
{name="alpha", factor=3.1415, enabled=true, bits=[1,2,3]},
{name="beta", factor=2.71828, enabled=true, bits=[1,2,3]}
]
[inputs.statetest.params]
b = "bar"
a = "foo"
[[inputs.statetest]]
method = "strange"
servers = ["myserver.org", "myserver.com"]
port = 80
setup = [
{name="alpha", factor=3.1415, enabled=true, bits=[1,2,3]},
{name="beta", factor=2.71828, enabled=true, bits=[1,2,3]}
]
[inputs.statetest.params]
a = "foo"
b = "bar"

View file

@ -0,0 +1,17 @@
[[inputs.statetest]]
servers = ["myserverA.org"]
port = 42
method = "strange"
[[inputs.statetest]]
servers = ["myserverB.org"]
port = 23
method = "strange"
[[inputs.statetest]]
servers = ["myserverC.org"]
port = 80
method = "strange"
[inputs.statetest.params]
a = "foo"
b = "bar"

View file

@ -0,0 +1,8 @@
[[processors.statetest]]
option = "foo"
[[processors.statetest]]
option = "bar"
[[processors.statetest]]
option = "captain obvious"

4
config/testdata/subconfig/exec.conf vendored Normal file
View file

@ -0,0 +1,4 @@
[[inputs.exec]]
# the command to run
command = "/usr/bin/myothercollector --foo=bar"
name_suffix = "_myothercollector"

View file

@ -0,0 +1,11 @@
[[inputs.memcached]]
servers = ["192.168.1.1"]
namepass = ["metricname1"]
namedrop = ["metricname2"]
pass = ["some", "strings"]
drop = ["other", "stuff"]
interval = "5s"
[inputs.memcached.tagpass]
goodtag = ["mytag"]
[inputs.memcached.tagdrop]
badtag = ["othertag"]

View file

@ -0,0 +1,2 @@
[[inputs.procstat]]
pid_file = "/var/run/grafana-server.pid"

297
config/testdata/telegraf-agent.toml vendored Normal file
View file

@ -0,0 +1,297 @@
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs.
# Even if a plugin has no configuration, it must be declared in here
# to be active. Declaring a plugin means just specifying the name
# as a section with no variables. To deactivate a plugin, comment
# out the name and any variables.
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
# file would generate.
# One rule that plugins conform to is wherever a connection string
# can be passed, the values '' and 'localhost' are treated specially.
# They indicate to the plugin to use their own builtin configuration to
# connect to the local system.
# NOTE: The configuration has a few required parameters. They are marked
# with 'required'. Be sure to edit those to make this configuration work.
# Tags can also be specified via a normal map, but only one form at a time:
[global_tags]
dc = "us-east-1"
# Configuration for telegraf agent
[agent]
# Default data collection interval for all plugins
interval = "10s"
# run telegraf in debug mode
debug = false
# Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP endpoint URL for your InfluxDB instance
# Multiple urls can be specified for InfluxDB cluster support. Server to
# write to will be randomly chosen each interval.
urls = ["http://localhost:8086"] # required.
# The target database for metrics. This database must already exist
database = "telegraf" # required.
[[outputs.influxdb]]
urls = ["udp://localhost:8089"]
database = "udp-telegraf"
# Configuration for the Kafka server to send metrics to
[[outputs.kafka]]
# URLs of kafka brokers
brokers = ["localhost:9092"]
# Kafka topic for producer messages
topic = "telegraf"
# Telegraf tag to use as a routing key
# ie, if this tag exists, its value will be used as the routing key
routing_tag = "host"
###############################################################################
# PLUGINS #
###############################################################################
# Read Apache status information (mod_status)
[[inputs.apache]]
# An array of Apache status URI to gather stats.
urls = ["http://localhost/server-status?auto"]
# Read metrics about cpu usage
[[inputs.cpu]]
# Whether to report per-cpu stats or not
percpu = true
# Whether to report total system cpu stats or not
totalcpu = true
# Comment this line if you want the raw CPU time metrics
fieldexclude = ["cpu_time"]
# Read metrics about disk usage by mount point
[[inputs.diskio]]
# no configuration
# Read metrics from one or many disque servers
[[inputs.disque]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
# 10.0.0.1:10000, etc.
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read stats from one or more Elasticsearch servers or clusters
[[inputs.elasticsearch]]
# specify a list of one or more Elasticsearch servers
servers = ["http://localhost:9200"]
# set local to false when you want to read the indices stats from all nodes
# within the cluster
local = true
# Read flattened metrics from one or more commands that output JSON to stdout
[[inputs.exec]]
# the command to run
command = "/usr/bin/mycollector --foo=bar"
name_suffix = "_mycollector"
# Read metrics of haproxy, via socket or csv stats page
[[inputs.haproxy]]
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.10.3.33:1936, etc.
#
# If no servers are specified, then default to 127.0.0.1:1936
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
# Or you can also use local socket(not work yet)
# servers = ["socket:/run/haproxy/admin.sock"]
# Read flattened metrics from one or more JSON HTTP endpoints
[[inputs.http]]
# a name for the service being polled
name_override = "webserver_stats"
# URL of each server in the service's cluster
urls = [
"http://localhost:9999/stats/",
"http://localhost:9998/stats/",
]
# HTTP method to use (case-sensitive)
# method = "GET"
data_format = "json"
# Read metrics about disk IO by device
[[inputs.diskio]]
# no configuration
# read metrics from a Kafka 0.9+ topic
[[inputs.kafka_consumer]]
## kafka brokers
brokers = ["localhost:9092"]
## topic(s) to consume
topics = ["telegraf"]
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
## Offset (must be either "oldest" or "newest")
offset = "oldest"
# Read metrics from a LeoFS Server via SNMP
[[inputs.leofs]]
# An array of URI to gather stats about LeoFS.
# Specify an ip or hostname with port. ie 127.0.0.1:4020
#
# If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port.
servers = ["127.0.0.1:4021"]
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Read metrics from one or many memcached servers
[[inputs.memcached]]
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.0.0.1:11211, etc.
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Telegraf plugin for gathering metrics from N Mesos masters
[[inputs.mesos]]
# Timeout, in ms.
timeout = 100
# A list of Mesos masters, default value is localhost:5050.
masters = ["localhost:5050"]
# Metrics groups to be collected, by default, all enabled.
master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"]
# Read metrics from one or many MongoDB servers
[[inputs.mongodb]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
#
# If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port.
servers = ["127.0.0.1:27017"]
# Read metrics from one or many mysql servers
[[inputs.mysql]]
# specify servers via a url matching:
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
# e.g.
# servers = ["root:root@http://10.0.0.18/?tls=false"]
# servers = ["root:passwd@tcp(127.0.0.1:3306)/"]
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read metrics about network interface usage
[[inputs.net]]
# By default, telegraf gathers stats from any up interface (excluding loopback)
# Setting interfaces will tell it to gather these explicit interfaces,
# regardless of status.
#
# interfaces = ["eth0", ... ]
# Read Nginx's basic status information (ngx_http_stub_status_module)
[[inputs.nginx]]
# An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/status"]
# Ping given url(s) and return statistics
[[inputs.ping]]
# urls to ping
urls = ["www.google.com"] # required
# number of pings to send (ping -c <COUNT>)
count = 1 # required
# interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
ping_interval = 0.0
# ping timeout, in s. 0 == no timeout (ping -t <TIMEOUT>)
timeout = 0.0
# interface to send ping from (ping -I <INTERFACE>)
interface = ""
# Read metrics from one or many postgresql servers
[[inputs.postgresql]]
# specify address via a url matching:
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
# or a simple string:
# host=localhost user=pqgotest password=... sslmode=... dbname=app_production
#
# All connection parameters are optional. By default, the host is localhost
# and the user is the currently running user. For localhost, we default
# to sslmode=disable as well.
#
# Without the dbname parameter, the driver will default to a database
# with the same name as the user. This dbname is just for instantiating a
# connection with the server and doesn't restrict the databases we are trying
# to grab metrics for.
#
address = "sslmode=disable"
# A list of databases to pull metrics about. If not specified, metrics for all
# databases are gathered.
# databases = ["app_production", "blah_testing"]
# [[postgresql.servers]]
# address = "influx@remoteserver"
# Read metrics from one or many prometheus clients
[[inputs.prometheus]]
# An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"]
# Read metrics from one or many RabbitMQ servers via the management API
[[inputs.rabbitmq]]
# Specify servers via an array of tables
# name = "rmq-server-1" # optional tag
# url = "http://localhost:15672"
# username = "guest"
# password = "guest"
# A list of nodes to pull metrics about. If not specified, metrics for
# all nodes are gathered.
# nodes = ["rabbit@node1", "rabbit@node2"]
# Read metrics from one or many redis servers
[[inputs.redis]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
# 10.0.0.1:10000, etc.
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read metrics from one or many RethinkDB servers
[[inputs.rethinkdb]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
#
# If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port.
servers = ["127.0.0.1:28015"]
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
# Read metrics about system load & uptime
[[inputs.system]]
# no configuration

5
config/testdata/wrong_cert_path.toml vendored Normal file
View file

@ -0,0 +1,5 @@
[[inputs.http_listener_v2]]
write_timeout = "1s"
max_body_size = "1MiB"
tls_cert = "invalid.pem"
tls_key = "invalid.key"

2
config/testdata/wrong_field_type.toml vendored Normal file
View file

@ -0,0 +1,2 @@
[[inputs.http_listener_v2]]
port = "80"

View file

@ -0,0 +1,2 @@
[[inputs.http_listener_v2]]
methods = "POST"