Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
60
plugins/outputs/cloudwatch_logs/sample.conf
Normal file
60
plugins/outputs/cloudwatch_logs/sample.conf
Normal file
|
@ -0,0 +1,60 @@
|
|||
# Configuration for AWS CloudWatchLogs output.
|
||||
[[outputs.cloudwatch_logs]]
|
||||
## The region is the Amazon region that you wish to connect to.
|
||||
## Examples include but are not limited to:
|
||||
## - us-west-1
|
||||
## - us-west-2
|
||||
## - us-east-1
|
||||
## - ap-southeast-1
|
||||
## - ap-southeast-2
|
||||
## ...
|
||||
region = "us-east-1"
|
||||
|
||||
## Amazon Credentials
|
||||
## Credentials are loaded in the following order
|
||||
## 1) Web identity provider credentials via STS if role_arn and
|
||||
## web_identity_token_file are specified
|
||||
## 2) Assumed credentials via STS if role_arn is specified
|
||||
## 3) explicit credentials from 'access_key' and 'secret_key'
|
||||
## 4) shared profile from 'profile'
|
||||
## 5) environment variables
|
||||
## 6) shared credentials file
|
||||
## 7) EC2 Instance Profile
|
||||
#access_key = ""
|
||||
#secret_key = ""
|
||||
#token = ""
|
||||
#role_arn = ""
|
||||
#web_identity_token_file = ""
|
||||
#role_session_name = ""
|
||||
#profile = ""
|
||||
#shared_credential_file = ""
|
||||
|
||||
## Endpoint to make request against, the correct endpoint is automatically
|
||||
## determined and this option should only be set if you wish to override the
|
||||
## default, e.g endpoint_url = "http://localhost:8000"
|
||||
# endpoint_url = ""
|
||||
|
||||
## Cloud watch log group. Must be created in AWS cloudwatch logs upfront!
|
||||
## For example, you can specify the name of the k8s cluster here to group logs
|
||||
## from all cluster in oine place
|
||||
log_group = "my-group-name"
|
||||
|
||||
## Log stream in log group
|
||||
## Either log group name or reference to metric attribute, from which it can
|
||||
## be parsed, tag:<TAG_NAME> or field:<FIELD_NAME>. If the log stream is not
|
||||
## exist, it will be created. Since AWS is not automatically delete logs
|
||||
## streams with expired logs entries (i.e. empty log stream) you need to put
|
||||
## in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855)
|
||||
log_stream = "tag:location"
|
||||
|
||||
## Source of log data - metric name
|
||||
## specify the name of the metric, from which the log data should be
|
||||
## retrieved. I.e., if you are using docker_log plugin to stream logs from
|
||||
## container, then specify log_data_metric_name = "docker_log"
|
||||
log_data_metric_name = "docker_log"
|
||||
|
||||
## Specify from which metric attribute the log data should be retrieved:
|
||||
## tag:<TAG_NAME> or field:<FIELD_NAME>.
|
||||
## I.e., if you are using docker_log plugin to stream logs from container,
|
||||
## then specify log_data_source = "field:message"
|
||||
log_data_source = "field:message"
|
Loading…
Add table
Add a link
Reference in a new issue