# Read metrics from one or many prometheus clients [[inputs.prometheus]] ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. ## Valid options: 1, 2 # metric_version = 1 ## Url tag name (tag containing scrapped url. optional, default is "url") # url_tag = "url" ## Whether the timestamp of the scraped metrics will be ignored. ## If set to true, the gather time will be used. # ignore_timestamp = false ## Override content-type of the returned message ## Available options are for prometheus: ## text, protobuf-delimiter, protobuf-compact, protobuf-text, ## and for openmetrics: ## openmetrics-text, openmetrics-protobuf ## By default the content-type of the response is used. # content_type_override = "" ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] ## Kubernetes config file to create client from. # kube_config = "/path/to/kubernetes.config" ## Scrape Pods ## Enable scraping of k8s pods. Further settings as to which pods to scape ## are determiend by the 'method' option below. When enabled, the default is ## to use annotations to determine whether to scrape or not. # monitor_kubernetes_pods = false ## Scrape Pods Method ## annotations: default, looks for specific pod annotations documented below ## settings: only look for pods matching the settings provided, not ## annotations ## settings+annotations: looks at pods that match annotations using the user ## defined settings # monitor_kubernetes_pods_method = "annotations" ## Scrape Pods 'annotations' method options ## If set method is set to 'annotations' or 'settings+annotations', these ## annotation flags are looked for: ## - prometheus.io/scrape: Required to enable scraping for this pod. Can also ## use 'prometheus.io/scrape=false' annotation to opt-out entirely. ## - prometheus.io/scheme: If the metrics endpoint is secured then you will ## need to set this to 'https' & most likely set the tls config ## - prometheus.io/path: If the metrics path is not /metrics, define it with ## this annotation ## - prometheus.io/port: If port is not 9102 use this annotation ## Scrape Pods 'settings' method options ## When using 'settings' or 'settings+annotations', the default values for ## annotations can be modified using with the following options: # monitor_kubernetes_pods_scheme = "http" # monitor_kubernetes_pods_port = "9102" # monitor_kubernetes_pods_path = "/metrics" ## Get the list of pods to scrape with either the scope of ## - cluster: the kubernetes watch api (default, no need to specify) ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. # pod_scrape_scope = "cluster" ## Only for node scrape scope: node IP of the node that telegraf is running on. ## Either this config or the environment variable NODE_IP must be set. # node_ip = "10.180.1.1" ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. ## Default is 60 seconds. # pod_scrape_interval = 60 ## Content length limit ## When set, telegraf will drop responses with length larger than the configured value. ## Default is "0KB" which means unlimited. # content_length_limit = "0KB" ## Restricts Kubernetes monitoring to a single namespace ## ex: monitor_kubernetes_pods_namespace = "default" # monitor_kubernetes_pods_namespace = "" ## The name of the label for the pod that is being scraped. ## Default is 'namespace' but this can conflict with metrics that have the label 'namespace' # pod_namespace_label_name = "namespace" # label selector to target pods which have the label # kubernetes_label_selector = "env=dev,app=nginx" # field selector to target pods # eg. To scrape pods on a specific node # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" ## Filter which pod annotations and labels will be added to metric tags # # pod_annotation_include = ["annotation-key-1"] # pod_annotation_exclude = ["exclude-me"] # pod_label_include = ["label-key-1"] # pod_label_exclude = ["exclude-me"] # cache refresh interval to set the interval for re-sync of pods list. # Default is 60 minutes. # cache_refresh_interval = 60 ## Use bearer token for authorization. ('bearer_token' takes priority) # bearer_token = "/path/to/bearer/token" ## OR # bearer_token_string = "abc_123" ## HTTP Basic Authentication username and password. ('bearer_token' and ## 'bearer_token_string' take priority) # username = "" # password = "" ## Optional custom HTTP headers # http_headers = {"X-Special-Header" = "Special-Value"} ## Specify timeout duration for slower prometheus clients (default is 5s) # timeout = "5s" ## This option is now used by the HTTP client to set the header response ## timeout, not the overall HTTP timeout. # response_timeout = "5s" ## HTTP Proxy support # use_system_proxy = false # http_proxy_url = "" ## Optional TLS Config # tls_ca = /path/to/cafile # tls_cert = /path/to/certfile # tls_key = /path/to/keyfile ## Use TLS but skip chain & host verification # insecure_skip_verify = false ## Use the given name as the SNI server name on each URL # tls_server_name = "myhost.example.org" ## TLS renegotiation method, choose from "never", "once", "freely" # tls_renegotiation_method = "never" ## Enable/disable TLS ## Set to true/false to enforce TLS being enabled/disabled. If not set, ## enable TLS only if any of the other options are specified. # tls_enable = true ## This option allows you to report the status of prometheus requests. # enable_request_metrics = false ## Scrape Services available in Consul Catalog # [inputs.prometheus.consul] # enabled = true # agent = "http://localhost:8500" # query_interval = "5m" # [[inputs.prometheus.consul.query]] # name = "a service name" # tag = "a service tag" # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' # [inputs.prometheus.consul.query.tags] # host = "{{.Node}}" ## Control pod scraping based on pod namespace annotations ## Pass and drop here act like tagpass and tagdrop, but instead ## of filtering metrics they filters pod candidates for scraping #[inputs.prometheus.namespace_annotation_pass] # annotation_key = ["value1", "value2"] #[inputs.prometheus.namespace_annotation_drop] # some_annotation_key = ["dont-scrape"]