1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

885
.circleci/config.yml Normal file
View file

@ -0,0 +1,885 @@
version: 2.1
orbs:
win: circleci/windows@5.0.0
aws-cli: circleci/aws-cli@3.1.1
executors:
telegraf-ci:
working_directory: '/go/src/github.com/influxdata/telegraf'
resource_class: large
docker:
- image: 'quay.io/influxdb/telegraf-ci:1.24.3'
environment:
GOFLAGS: -p=4
mac:
working_directory: '~/go/src/github.com/influxdata/telegraf'
resource_class: macos.m1.medium.gen1
macos:
xcode: 15.4.0
environment:
HOMEBREW_NO_AUTO_UPDATE: 1
GOFLAGS: -p=4
commands:
check-changed-files-or-halt:
steps:
- run: ./scripts/check-file-changes.sh
test-go:
parameters:
os:
type: string
default: "linux"
arch:
type: string
default: "amd64"
gotestsum:
type: string
default: "gotestsum"
steps:
- run: ./scripts/install_gotestsum.sh << parameters.os >> << parameters.gotestsum >>
- unless:
condition:
equal: [ "386", << parameters.arch >> ]
steps:
- run: echo 'export RACE="-race"' >> $BASH_ENV
- when:
condition:
equal: [ windows, << parameters.os >> ]
steps:
- run: echo 'export CGO_ENABLED=1' >> $BASH_ENV
- when:
condition:
equal: [ darwin, << parameters.os >> ]
steps:
- run: echo 'export RACE="$RACE -ldflags=-extldflags=-Wl,-ld_classic"' >> $BASH_ENV
- run: |
GOARCH=<< parameters.arch >> ./<< parameters.gotestsum >> -- ${RACE} -short ./...
package-build:
parameters:
type:
type: string
default: ""
nightly:
type: boolean
default: false
steps:
- checkout
- check-changed-files-or-halt
- attach_workspace:
at: '/go'
- when:
condition:
equal: [ windows, << parameters.type >> ]
steps:
- run: go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.4.0
- when:
condition: << parameters.nightly >>
steps:
- run:
command: 'NIGHTLY=1 make package include_packages="$(make << parameters.type >>)"'
no_output_timeout: 30m
- unless:
condition:
or:
- << parameters.nightly >>
steps:
- run:
command: 'make package include_packages="$(make << parameters.type >>)"'
no_output_timeout: 30m
- store_artifacts:
path: './build/dist'
destination: 'build/dist'
- persist_to_workspace:
root: './build'
paths:
- 'dist'
jobs:
lint-linux:
executor: telegraf-ci
steps:
- checkout
- run: ./scripts/make_docs.sh
- check-changed-files-or-halt
- run: 'make deps'
- run: 'make tidy'
- run: 'make check'
- run: 'make check-deps'
- run:
name: "Install golangci-lint"
command: go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.1.2
- run:
name: "golangci-lint/Linux"
# There are only 4 vCPUs available for this executor, so use only 4 instead of the default number
# (the OS may report the number of CPUs on the host instead of the number of CPUs available to the guest).
command: GOGC=80 GOMEMLIMIT=6144MiB /go/bin/golangci-lint run --verbose --timeout=30m --concurrency 4
no_output_timeout: 30m
lint-macos:
executor: telegraf-ci
steps:
- checkout
- check-changed-files-or-halt
- run:
name: "Install golangci-lint"
command: go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.1.2
- run:
name: "golangci-lint/macOS"
# There are only 4 vCPUs available for this executor, so use only 4 instead of the default number
# (the OS may report the number of CPUs on the host instead of the number of CPUs available to the guest).
command: GOGC=80 GOMEMLIMIT=6144MiB GOOS=darwin /go/bin/golangci-lint run --verbose --timeout=30m --concurrency 4
no_output_timeout: 30m
lint-windows:
executor: telegraf-ci
steps:
- checkout
- check-changed-files-or-halt
- run:
name: "Install golangci-lint"
command: go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.1.2
- run:
name: "golangci-lint/Windows"
# There are only 4 vCPUs available for this executor, so use only 4 instead of the default number
# (the OS may report the number of CPUs on the host instead of the number of CPUs available to the guest).
command: GOGC=80 GOMEMLIMIT=6144MiB GOOS=windows /go/bin/golangci-lint run --verbose --timeout=30m --concurrency 4
no_output_timeout: 30m
test-go-linux:
executor: telegraf-ci
steps:
- checkout
- check-changed-files-or-halt
- test-go
test-go-linux-386:
executor: telegraf-ci
steps:
- checkout
- check-changed-files-or-halt
- run: 'GOARCH=386 make deps'
- run: 'GOARCH=386 make tidy'
- run: 'GOARCH=386 make check'
- test-go:
arch: "386"
test-integration:
machine:
image: ubuntu-2204:current
resource_class: large
steps:
- checkout
- check-changed-files-or-halt
- run: 'sh ./scripts/installgo_linux.sh'
- run: 'make deps'
- run:
name: "Run integration tests"
command: make test-integration
environment:
AZURE_EVENT_HUBS_EMULATOR_ACCEPT_EULA: yes
test-go-mac:
executor: mac
steps:
- checkout
- check-changed-files-or-halt
- run: 'sh ./scripts/installgo_mac.sh'
- test-go:
os: darwin
arch: arm64
test-go-windows:
executor:
name: win/default
shell: bash.exe
size: large
steps:
- checkout
- check-changed-files-or-halt
- run: git config --system core.longpaths true
- run: choco feature enable -n allowGlobalConfirmation
- run: 'sh ./scripts/installgo_windows.sh'
- run: choco install mingw
- run: echo 'export PATH="$PATH:/c/ProgramData/mingw64/mingw64/bin"' >> $BASH_ENV
- test-go:
os: windows
gotestsum: "gotestsum.exe"
test-licenses:
executor: telegraf-ci
steps:
- checkout
- check-changed-files-or-halt
- run: 'make build_tools'
- run: './tools/license_checker/license_checker -whitelist ./tools/license_checker/data/whitelist'
windows-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: windows
nightly: << parameters.nightly >>
darwin-amd64-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: darwin-amd64
nightly: << parameters.nightly >>
darwin-arm64-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: darwin-arm64
nightly: << parameters.nightly >>
i386-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: i386
nightly: << parameters.nightly >>
ppc64le-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: ppc64le
nightly: << parameters.nightly >>
riscv64-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: riscv64
nightly: << parameters.nightly >>
loong64-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: loong64
nightly: << parameters.nightly >>
s390x-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: s390x
nightly: << parameters.nightly >>
armel-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: armel
nightly: << parameters.nightly >>
amd64-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: amd64
nightly: << parameters.nightly >>
arm64-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: arm64
nightly: << parameters.nightly >>
mipsel-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: mipsel
nightly: << parameters.nightly >>
mips-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: mips
nightly: << parameters.nightly >>
armhf-package:
parameters:
nightly:
type: boolean
default: false
executor: telegraf-ci
steps:
- package-build:
type: armhf
nightly: << parameters.nightly >>
nightly:
executor: telegraf-ci
steps:
- attach_workspace:
at: '/build'
- run:
command: |
aws s3 sync /build/dist s3://dl.influxdata.com/telegraf/nightlies/ \
--exclude "*" \
--include "*.tar.gz" \
--include "*.deb" \
--include "*.rpm" \
--include "*.zip" \
--acl public-read
release:
executor: telegraf-ci
steps:
- attach_workspace:
at: '/build'
- run:
command: |
aws s3 sync /build/dist s3://dl.influxdata.com/telegraf/releases/ \
--exclude "*" \
--include "telegraf*.DIGESTS" \
--include "telegraf*.digests" \
--include "telegraf*.asc" \
--include "telegraf*.deb" \
--include "telegraf*.dmg" \
--include "telegraf*.rpm" \
--include "telegraf*.tar.gz" \
--include "telegraf*.zip" \
--acl public-read
docker-nightly:
machine:
image: ubuntu-2204:current
steps:
- run:
name: login to quay.io
command: docker login --username="${QUAY_USER}" --password="${QUAY_PASS}" quay.io
- run:
name: clone influxdata/influxdata-docker
command: git clone https://github.com/influxdata/influxdata-docker
- run:
name: build and push telegraf:nightly
command: |
cd influxdata-docker/telegraf/nightly
docker build -t telegraf .
docker tag telegraf quay.io/influxdb/telegraf-nightly:latest
docker image ls
docker push quay.io/influxdb/telegraf-nightly:latest
- run:
name: build and push telegraf:nightly-alpine
command: |
cd influxdata-docker/telegraf/nightly/alpine
docker build -t telegraf-alpine .
docker tag telegraf-alpine quay.io/influxdb/telegraf-nightly:alpine
docker image ls
docker push quay.io/influxdb/telegraf-nightly:alpine
amd64-package-test-nightly:
machine:
image: ubuntu-2204:current
steps:
- checkout
- attach_workspace:
at: '.'
- run: sh ./scripts/installgo_linux.sh
- run: ./scripts/install_incus.sh
- run: cd tools/package_incus_test && go build
- run: sudo ./tools/package_incus_test/package_incus_test --package $(find ./dist -name "*_amd64.deb")
- run: sudo ./tools/package_incus_test/package_incus_test --package $(find ./dist -name "*.x86_64.rpm")
package-sign-windows:
machine:
image: ubuntu-2204:current
resource_class: medium
steps:
- checkout
- check-changed-files-or-halt
- attach_workspace:
at: '.'
- run:
name: "Sign Windows Executables"
command: ./scripts/sign-windows.sh
- persist_to_workspace:
root: '.'
paths:
- 'dist'
package-sign-mac:
executor: mac
working_directory: /Users/distiller/project
environment:
FL_OUTPUT_DIR: output
FASTLANE_LANE: test
shell: /bin/bash --login -o pipefail
steps:
- checkout
- check-changed-files-or-halt
- attach_workspace:
at: '.'
- run:
command: |
sh ./scripts/mac-signing.sh
- persist_to_workspace:
root: './build'
paths:
- 'dist'
package-consolidate:
docker:
- image: alpine
steps:
- attach_workspace:
at: '.'
- run:
command: |
cd dist && find . -type f -name '._*' -delete
- store_artifacts:
path: './dist'
destination: 'build/dist'
- run:
command: |
echo "This job contains all the final artifacts."
share-artifacts:
executor: aws-cli/default
steps:
- checkout
- check-changed-files-or-halt
- run:
command: |
PR=${CIRCLE_PULL_REQUEST##*/}
printf -v payload '{ "pullRequestNumber": "%s" }' "$PR"
curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload"
package-sign:
circleci_ip_ranges: true
docker:
- image: quay.io/influxdb/rsign:latest
auth:
username: $QUAY_RSIGN_USERNAME
password: $QUAY_RSIGN_PASSWORD
steps:
- add_ssh_keys:
fingerprints:
- 3b:c0:fe:a0:8a:93:33:69:de:22:ac:20:a6:ed:6b:e5
- attach_workspace:
at: .
- run: |
cd dist
# Generate the *.DIGESTS files. This must be done before the signing
# step so that the *.DIGEST files are also signed.
for target in *
do
sha256sum "${target}" > "${target}.DIGESTS"
done
for target in *
do
case "${target}"
in
# rsign is shipped on Alpine Linux which uses "busybox ash" instead
# of bash. ash is somewhat more posix compliant and is missing some
# extensions and niceties from bash.
*.deb|*.dmg|*.rpm|*.tar.gz|*.zip|*.DIGESTS)
rsign "${target}"
;;
esac
done
for target in *
do
case "${target}"
in
*.deb|*.dmg|*.rpm|*.tar.gz|*.zip)
# Print sha256 hash and target for artifacts all in one file
# for use later during the release.
cat "${target}.DIGESTS" >> "telegraf-${CIRCLE_TAG}.DIGESTS"
;;
esac
done
- persist_to_workspace:
root: ./
paths:
- dist
- store_artifacts:
path: ./dist
workflows:
version: 2
check:
when:
not:
equal: [ scheduled_pipeline, << pipeline.trigger_source >> ]
jobs:
- 'lint-linux':
filters:
tags:
only: /.*/
- 'lint-macos':
filters:
tags:
only: /.*/
- 'lint-windows':
filters:
tags:
only: /.*/
- 'test-go-linux':
filters:
tags:
only: /.*/
- 'test-go-linux-386':
filters:
tags:
only: /.*/
- 'test-go-mac':
filters:
tags: # only runs on tags if you specify this filter
only: /.*/
- 'test-go-windows':
filters:
tags:
only: /.*/
- 'test-integration':
filters:
tags:
only: /.*/
- 'windows-package':
requires:
- 'test-go-linux'
filters:
tags:
only: /.*/
- 'darwin-amd64-package':
requires:
- 'test-go-mac'
filters:
tags:
only: /.*/
- 'darwin-arm64-package':
requires:
- 'test-go-mac'
filters:
branches:
ignore:
- master
tags:
only: /.*/
- 'i386-package':
requires:
- 'test-go-linux-386'
filters:
branches:
ignore:
- master
tags:
only: /.*/
- 'ppc64le-package':
requires:
- 'test-go-linux'
filters:
branches:
ignore:
- master
tags:
only: /.*/
- 'riscv64-package':
requires:
- 'test-go-linux'
filters:
branches:
ignore:
- master
tags:
only: /.*/
- 'loong64-package':
requires:
- 'test-go-linux'
filters:
branches:
ignore:
- /.*/
tags:
only: /.*/
- 's390x-package':
requires:
- 'test-go-linux'
filters:
branches:
ignore:
- master
tags:
only: /.*/
- 'armel-package':
requires:
- 'test-go-linux'
filters:
branches:
ignore:
- master
tags:
only: /.*/
- 'amd64-package':
requires:
- 'test-go-linux'
filters:
tags:
only: /.*/
- 'arm64-package':
requires:
- 'test-go-linux'
filters:
branches:
ignore:
- master
tags:
only: /.*/
- 'armhf-package':
requires:
- 'test-go-linux'
filters:
branches:
ignore:
- master
tags:
only: /.*/
- 'mipsel-package':
requires:
- 'test-go-linux'
filters:
branches:
ignore:
- master
tags:
only: /.*/
- 'mips-package':
requires:
- 'test-go-linux'
filters:
branches:
ignore:
- master
tags:
only: /.*/
- 'share-artifacts':
requires:
- 'i386-package'
- 'ppc64le-package'
- 'riscv64-package'
- 's390x-package'
- 'armel-package'
- 'amd64-package'
- 'mipsel-package'
- 'mips-package'
- 'loong64-package'
- 'darwin-amd64-package'
- 'darwin-arm64-package'
- 'windows-package'
- 'arm64-package'
- 'armhf-package'
filters:
branches:
ignore:
- master
- release.*
tags:
ignore: /.*/
- 'package-sign-windows':
requires:
- 'windows-package'
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- 'package-sign-mac':
requires:
- 'darwin-amd64-package'
- 'darwin-arm64-package'
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- 'package-sign':
requires:
- 'i386-package'
- 'ppc64le-package'
- 'riscv64-package'
- 's390x-package'
- 'armel-package'
- 'amd64-package'
- 'mipsel-package'
- 'mips-package'
- 'loong64-package'
- 'arm64-package'
- 'armhf-package'
- 'package-sign-mac'
- 'package-sign-windows'
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- 'package-consolidate':
requires:
- 'i386-package'
- 'ppc64le-package'
- 's390x-package'
- 'armel-package'
- 'amd64-package'
- 'mipsel-package'
- 'mips-package'
- 'arm64-package'
- 'armhf-package'
- 'riscv64-package'
- 'loong64-package'
- 'package-sign-mac'
- 'package-sign-windows'
- 'package-sign'
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- 'release':
requires:
- 'package-consolidate'
filters:
tags:
only: /.*/
branches:
ignore: /.*/
nightly:
when:
equal: [ scheduled_pipeline, << pipeline.trigger_source >> ]
jobs:
- 'lint-linux'
- 'lint-macos'
- 'lint-windows'
- 'test-go-linux'
- 'test-go-linux-386'
- 'test-go-mac'
- 'test-go-windows'
- 'test-licenses'
- 'windows-package':
name: 'windows-package-nightly'
nightly: true
requires:
- 'test-go-windows'
- 'darwin-amd64-package':
name: 'darwin-amd64-package-nightly'
nightly: true
requires:
- 'test-go-mac'
- 'darwin-arm64-package':
name: 'darwin-arm64-package-nightly'
nightly: true
requires:
- 'test-go-mac'
- 'i386-package':
name: 'i386-package-nightly'
nightly: true
requires:
- 'test-go-linux-386'
- 'ppc64le-package':
name: 'ppc64le-package-nightly'
nightly: true
requires:
- 'test-go-linux'
- 'riscv64-package':
name: 'riscv64-package-nightly'
nightly: true
requires:
- 'test-go-linux'
- 'loong64-package':
name: 'loong64-package-nightly'
nightly: true
requires:
- 'test-go-linux'
- 's390x-package':
name: 's390x-package-nightly'
nightly: true
requires:
- 'test-go-linux'
- 'armel-package':
name: 'armel-package-nightly'
nightly: true
requires:
- 'test-go-linux'
- 'amd64-package':
name: 'amd64-package-nightly'
nightly: true
requires:
- 'test-go-linux'
- 'arm64-package':
name: 'arm64-package-nightly'
nightly: true
requires:
- 'test-go-linux'
- 'armhf-package':
name: 'armhf-package-nightly'
nightly: true
requires:
- 'test-go-linux'
- 'mipsel-package':
name: 'mipsel-package-nightly'
nightly: true
requires:
- 'test-go-linux'
- 'mips-package':
name: 'mips-package-nightly'
nightly: true
requires:
- 'test-go-linux'
- 'package-sign-windows':
requires:
- 'windows-package-nightly'
- 'package-sign-mac':
requires:
- 'darwin-amd64-package-nightly'
- 'darwin-arm64-package-nightly'
- nightly:
requires:
- 'amd64-package-test-nightly'
- 'arm64-package-nightly'
- 'armel-package-nightly'
- 'armhf-package-nightly'
- 'darwin-amd64-package-nightly'
- 'darwin-arm64-package-nightly'
- 'i386-package-nightly'
- 'mips-package-nightly'
- 'mipsel-package-nightly'
- 'loong64-package-nightly'
- 'ppc64le-package-nightly'
- 'riscv64-package-nightly'
- 's390x-package-nightly'
- 'windows-package-nightly'
- docker-nightly:
requires:
- 'nightly'
- amd64-package-test-nightly:
requires:
- 'amd64-package-nightly'

8
.gitattributes vendored Normal file
View file

@ -0,0 +1,8 @@
CHANGELOG.md merge=union
README.md merge=union
go.sum merge=union
plugins/inputs/all/all.go merge=union
plugins/outputs/all/all.go merge=union
# Always check-out / check-in files with LF line endings.
* text=auto eol=lf

74
.github/ISSUE_TEMPLATE/BUG_REPORT.yml vendored Normal file
View file

@ -0,0 +1,74 @@
name: Bug Report
description: Create a bug report to help us improve
labels: ["bug"]
body:
- type: markdown
attributes:
value: |
Thanks for taking time to fill out this bug report! We reserve Telegraf issues for bugs for reproducible problems.
Please redirect any questions about Telegraf usage to our [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/) we have a lot of talented community members there who could help answer your question more quickly.
- type: textarea
id: config
attributes:
label: Relevant telegraf.conf
description: Place config in the toml code section. This will be automatically formatted into toml, so no need for backticks.
render: toml
validations:
required: true
- type: textarea
id: logs
attributes:
label: Logs from Telegraf
description: Please include the Telegraf logs, ideally with `--debug` used.
render: text
validations:
required: true
- type: input
id: system-info
attributes:
label: System info
description: Include Telegraf version, operating system, and other relevant details
placeholder: ex. Telegraf 1.20.0, Ubuntu 20.04, Docker 20.10.8
validations:
required: true
- type: textarea
id: docker
attributes:
label: Docker
description: If your bug involves third party dependencies or services, it can be very helpful to provide a Dockerfile or docker-compose.yml that reproduces the environment you're testing against.
validations:
required: false
- type: textarea
id: reproduce
attributes:
label: Steps to reproduce
description: Describe the steps to reproduce the bug.
value: |
1.
2.
3.
...
validations:
required: true
- type: textarea
id: expected-behavior
attributes:
label: Expected behavior
description: Describe what you expected to happen when you performed the above steps.
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: Actual behavior
description: Describe what actually happened when you performed the above steps.
validations:
required: true
- type: textarea
id: additional-info
attributes:
label: Additional info
description: Include gist of relevant config, logs, etc.
validations:
required: false

View file

@ -0,0 +1,37 @@
name: Feature request
description: Create a feature request to make Telegraf more awesome
labels: ["feature request"]
body:
- type: markdown
attributes:
value: |
Thanks for taking time to share with us this feature request! Please describe why you would like this feature to be added to Telegraf and how you plan to use it to make your life better.
- type: textarea
id: use-case
attributes:
label: Use Case
description: Describe how you plan to use this feature.
validations:
required: true
- type: textarea
id: expected-behavior
attributes:
label: Expected behavior
description: Describe what you expected to happen when you performed the above steps.
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: Actual behavior
description: Describe what actually happened when you performed the above steps.
validations:
required: true
- type: textarea
id: additional-info
attributes:
label: Additional info
description: Include gist of relevant config, logs, etc.
validations:
required: false

22
.github/ISSUE_TEMPLATE/SUPPORT.yml vendored Normal file
View file

@ -0,0 +1,22 @@
name: Support request
description: Open a support request
labels: ["support"]
body:
- type: markdown
attributes:
value: |
WOAH, hold up. This isn't the best place for support questions.
You can get a faster response on slack or forums:
Please redirect any QUESTIONS about Telegraf usage to
- InfluxData Slack Channel: https://www.influxdata.com/slack
- InfluxData Community Site: https://community.influxdata.com
Check the documentation for the related plugin including the troubleshooting
section if available.
https://docs.influxdata.com/telegraf
https://github.com/influxdata/telegraf/tree/master/docs
- type: textarea
attributes:
label: "Please direct all support questions to Slack or the forums. Thank you."

18
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View file

@ -0,0 +1,18 @@
## Summary
<!-- Mandatory
Explain here the why, the rationale and motivation, for the changes.
-->
## Checklist
<!-- Mandatory
Please confirm the following by replacing the space with an "x" between the []:
-->
- [ ] No AI generated code was used in this PR
## Related issues
<!-- Mandatory
All PRs should resolve an issue, if one does not exist, please open one.
-->
resolves #

20
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,20 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
open-pull-requests-limit: 25
labels:
- "dependencies"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
open-pull-requests-limit: 25
ignore:
# Dependabot isn't able to update this packages that do not match the
# source, so anything with a version
- dependency-name: "*.v*"
labels:
- "dependencies"

65
.github/workflows/linter.yml vendored Normal file
View file

@ -0,0 +1,65 @@
---
#################################
#################################
## Super Linter GitHub Actions ##
#################################
#################################
name: Lint Code Base
#
# Documentation:
# https://help.github.com/en/articles/workflow-syntax-for-github-actions
#
#############################
# Start the job on all push #
#############################
on:
push:
branches-ignore: [master, main]
# Remove the line above to run when pushing to master
pull_request:
branches: [master, main]
###############
# Set the Job #
###############
permissions: {}
jobs:
build:
# Name the Job
permissions:
contents: read # to fetch code (actions/checkout)
statuses: write # to mark status of each linter run (github/super-linter)
name: Lint Code Base
# Set the agent to run on
runs-on: ubuntu-latest
##################
# Load all steps #
##################
steps:
##########################
# Checkout the code base #
##########################
- name: Checkout Code
uses: actions/checkout@v4
with:
# Full git history is needed to get a proper list of changed files within `super-linter`
fetch-depth: 0
################################
# Run Linter against code base #
################################
- name: Lint Code Base
uses: super-linter/super-linter@v7.3.0
env:
VALIDATE_ALL_CODEBASE: false
DEFAULT_BRANCH: master
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
LINTER_RULES_PATH: '.'
MARKDOWN_CONFIG_FILE: .markdownlint.yml
VALIDATE_MARKDOWN: true
VALIDATE_BASH: true

29
.github/workflows/milestones.yml vendored Normal file
View file

@ -0,0 +1,29 @@
name: Milestones
on:
pull_request_target:
types:
- closed
permissions:
issues: write
pull-requests: write
jobs:
milestone_job:
if: github.event.pull_request.merged == true
runs-on: ubuntu-latest
name: Assign milestones to PRs
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Assign milestone to PR
uses: srebhan/label-milestone-action@v1.0.1
id: assign-milestone
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
bugfix-labels: 'fix,chore,docs,test'
minor-labels: 'feat'
major-labels: 'breaking change'
fallback: 'minor'
- name: Show milestone
run: echo "Assigned milestone is ${{ steps.assign-milestone.outputs.milestone }}"

24
.github/workflows/pr-target-branch.yml vendored Normal file
View file

@ -0,0 +1,24 @@
name: Target Branch
on:
pull_request:
types:
- opened
- reopened
- synchronize
- edited
jobs:
check-target-master:
name: master
runs-on: ubuntu-latest
steps:
- name: debug
run: echo Target is ${{ github.event.pull_request.base.ref }}
- name: success
if: github.event.pull_request.base.ref == 'master'
run: exit 0
- name: error
if: github.event.pull_request.base.ref != 'master'
run: |
echo "Pull-request is not based on master, please rebase"
exit 1

26
.github/workflows/readme-linter.yml vendored Normal file
View file

@ -0,0 +1,26 @@
name: Lint plugin readmes
on:
# push:
# branches-ignore: master
pull_request:
branches: # Names of target branches, not source branches
- master
jobs:
run-readme-linter:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v5
with:
go-version: '1.24.3'
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
base_sha: ${{ github.event.pull_request.base.sha }}
files: ./plugins/**/README.md
- name: Run readme linter on changed files
if: steps.changed-files.outputs.any_changed == 'true'
run: go run ./tools/readme_linter ${{ steps.changed-files.outputs.all_changed_files }}

15
.github/workflows/semantic.yml vendored Normal file
View file

@ -0,0 +1,15 @@
---
name: "Semantic PR and Commit Messages"
on:
pull_request:
types: [opened, reopened, synchronize, edited]
branches:
- master
jobs:
semantic:
uses: influxdata/validate-semantic-github-messages/.github/workflows/semantic.yml@main
with:
COMMITS_HISTORY: 0

23
.gitignore vendored Normal file
View file

@ -0,0 +1,23 @@
/.idea
/build
/etc/telegraf.conf
/telegraf
/telegraf.exe
/telegraf.gz
/tools/package_lxd_test/package_lxd_test
/tools/license_checker/license_checker*
/tools/readme_config_includer/generator
/tools/readme_config_includer/generator.exe
/tools/config_includer/generator
/tools/config_includer/generator.exe
/tools/readme_linter/readme_linter*
/tools/custom_builder/custom_builder*
/vendor
.DS_Store
process.yml
/.vscode
/*.toml
/*.conf
resource.syso
versioninfo.json
.uuid

638
.golangci.yml Normal file
View file

@ -0,0 +1,638 @@
version: "2"
linters:
# Default set of linters.
# The value can be: `standard`, `all`, `none`, or `fast`.
# Default: standard
default: none
# Enable specific linter.
# https://golangci-lint.run/usage/linters/#enabled-by-default
enable:
- asasalint
- asciicheck
- bidichk
- bodyclose
- copyloopvar
- depguard
- dogsled
- errcheck
- errname
- errorlint
- gocheckcompilerdirectives
- gocritic
- goprintffuncname
- gosec
- govet
- ineffassign
- interfacebloat
- lll
- makezero
- mirror
- nakedret
- nilerr
- nolintlint
- perfsprint
- prealloc
- predeclared
- revive
- sqlclosecheck
- staticcheck
- testifylint
- tparallel
- unconvert
- unparam
- unused
- usetesting
settings:
depguard:
# Rules to apply.
#
# Variables:
# - File Variables
# Use an exclamation mark `!` to negate a variable.
# Example: `!$test` matches any file that is not a go test file.
#
# `$all` - matches all go files
# `$test` - matches all go test files
#
# - Package Variables
#
# `$gostd` - matches all of go's standard library (Pulled from `GOROOT`)
#
# Default (applies if no custom rules are defined): Only allow $gostd in all files.
rules:
# Name of a rule.
main:
# List of file globs that will match this list of settings to compare against.
# By default, if a path is relative, it is relative to the directory where the golangci-lint command is executed.
# The placeholder '${base-path}' is substituted with a path relative to the mode defined with `run.relative-path-mode`.
# The placeholder '${config-path}' is substituted with a path relative to the configuration file.
# Default: $all
files:
- '!**/agent/**'
- '!**/cmd/**'
- '!**/config/**'
- '!**/filter/**'
- '!**/internal/**'
- '!**/logger/**'
- '!**/metric/**'
- '!**/models/**'
- '!**/plugins/serializers/**'
- '!**/scripts/**'
- '!**/selfstat/**'
- '!**/testutil/**'
- '!**/tools/**'
- '!**/*_test.go'
# List of packages that are not allowed.
# Entries can be a variable (starting with $), a string prefix, or an exact match (if ending with $).
# Default: []
deny:
- pkg: log
desc: 'Use injected telegraf.Logger instead'
errcheck:
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`.
# Such cases aren't reported by default.
# Default: false
check-blank: true
# List of functions to exclude from checking, where each entry is a single function to exclude.
# See https://github.com/kisielk/errcheck#excluding-functions for details.
exclude-functions:
- '(*hash/maphash.Hash).Write'
- '(*hash/maphash.Hash).WriteByte'
- '(*hash/maphash.Hash).WriteString'
- '(*github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate.Template).UnmarshalText'
gocritic:
# Disable all checks.
# Default: false
disable-all: true
# Which checks should be enabled in addition to default checks; can't be combined with 'disabled-checks'.
# By default, list of stable checks is used (https://go-critic.com/overview#checks-overview).
# To see which checks are enabled run `GL_DEBUG=gocritic golangci-lint run --enable=gocritic`.
enabled-checks:
# diagnostic
- argOrder
- badCall
- badCond
- badLock
- badRegexp
- badSorting
- badSyncOnceFunc
- builtinShadowDecl
- caseOrder
- codegenComment
- commentedOutCode
- deferInLoop
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- dynamicFmtString
- emptyDecl
- evalOrder
- exitAfterDefer
- externalErrorReassign
- filepathJoin
- flagName
- mapKey
- nilValReturn
- offBy1
- regexpPattern
- sloppyLen
- sloppyReassign
- sloppyTypeAssert
- sortSlice
- sprintfQuotedString
- sqlQuery
- syncMapLoadAndDelete
- truncateCmp
- uncheckedInlineErr
- unnecessaryDefer
- weakCond
# performance
- appendCombine
- equalFold
- hugeParam
- indexAlloc
- preferDecodeRune
- preferFprint
- preferStringWriter
- preferWriteByte
- rangeExprCopy
- rangeValCopy
- sliceClear
- stringXbytes
# Settings passed to gocritic.
# The settings key is the name of a supported gocritic checker.
# The list of supported checkers can be found at https://go-critic.com/overview.
settings:
hugeParam:
# Size in bytes that makes the warning trigger.
# Default: 80
sizeThreshold: 512
rangeValCopy:
# Size in bytes that makes the warning trigger.
# Default: 128
sizeThreshold: 512
gosec:
# To select a subset of rules to run.
# Available rules: https://github.com/securego/gosec#available-rules
# Default: [] - means include all rules
includes:
- G101 # Look for hard coded credentials
- G102 # Bind to all interfaces
- G103 # Audit the use of unsafe block
- G106 # Audit the use of ssh.InsecureIgnoreHostKey
- G107 # Url provided to HTTP request as taint input
- G108 # Profiling endpoint automatically exposed on /debug/pprof
- G109 # Potential Integer overflow made by strconv.Atoi result conversion to int16/32
- G110 # Potential DoS vulnerability via decompression bomb
- G111 # Potential directory traversal
- G112 # Potential slowloris attack
- G114 # Use of net/http serve function that has no support for setting timeouts
- G201 # SQL query construction using format string
- G202 # SQL query construction using string concatenation
- G203 # Use of unescaped data in HTML templates
- G301 # Poor file permissions used when creating a directory
- G302 # Poor file permissions used with chmod
- G303 # Creating tempfile using a predictable path
- G305 # File traversal when extracting zip/tar archive
- G306 # Poor file permissions used when writing to a new file
- G401 # Detect the usage of MD5 or SHA1
- G403 # Ensure minimum RSA key length of 2048 bits
- G404 # Insecure random number source (rand)
- G405 # Detect the usage of DES or RC4
- G406 # Detect the usage of MD4 or RIPEMD160
- G501 # Import blocklist: crypto/md5
- G502 # Import blocklist: crypto/des
- G503 # Import blocklist: crypto/rc4
- G505 # Import blocklist: crypto/sha1
- G506 # Import blocklist: golang.org/x/crypto/md4
- G507 # Import blocklist: golang.org/x/crypto/ripemd160
- G601 # Implicit memory aliasing of items from a range statement
- G602 # Slice access out of bounds
# G104, G105, G113, G204, G304, G307, G402, G504 were not enabled intentionally
# TODO: review G115 when reporting false positives is fixed (https://github.com/securego/gosec/issues/1212)
# To specify the configuration of rules.
config:
# Maximum allowed permissions mode for os.OpenFile and os.Chmod
# Default: "0600"
G302: "0640"
# Maximum allowed permissions mode for os.WriteFile and ioutil.WriteFile
# Default: "0600"
G306: "0640"
govet:
# Settings per analyzer.
settings:
# Analyzer name, run `go tool vet help` to see all analyzers.
printf:
# Comma-separated list of print function names to check (in addition to default, see `go tool vet help printf`).
# Default: []
funcs:
- (github.com/influxdata/telegraf.Logger).Tracef
- (github.com/influxdata/telegraf.Logger).Debugf
- (github.com/influxdata/telegraf.Logger).Infof
- (github.com/influxdata/telegraf.Logger).Warnf
- (github.com/influxdata/telegraf.Logger).Errorf
- (github.com/influxdata/telegraf.Logger).Trace
- (github.com/influxdata/telegraf.Logger).Debug
- (github.com/influxdata/telegraf.Logger).Info
- (github.com/influxdata/telegraf.Logger).Warn
- (github.com/influxdata/telegraf.Logger).Error
lll:
# Max line length, lines longer will be reported.
# '\t' is counted as 1 character by default, and can be changed with the tab-width option.
# Default: 120.
line-length: 160
# Tab width in spaces.
# Default: 1
tab-width: 4
nakedret:
# Make an issue if func has more lines of code than this setting, and it has naked returns.
# Default: 30
max-func-lines: 1
nolintlint:
# Enable to require an explanation of nonzero length after each nolint directive.
# Default: false
require-explanation: true
# Enable to require nolint directives to mention the specific linter being suppressed.
# Default: false
require-specific: true
prealloc:
# Report pre-allocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
# Default: true
simple: false
revive:
# Sets the default severity.
# See https://github.com/mgechev/revive#configuration
# Default: warning
severity: error
# Run `GL_DEBUG=revive golangci-lint run --enable-only=revive` to see default, all available rules, and enabled rules.
rules:
- name: argument-limit
arguments: [ 6 ]
- name: atomic
- name: bare-return
- name: blank-imports
- name: bool-literal-in-expr
- name: call-to-gc
- name: comment-spacings
- name: confusing-naming
- name: confusing-results
- name: constant-logical-expr
- name: context-as-argument
- name: context-keys-type
- name: datarace
- name: deep-exit
- name: defer
- name: dot-imports
- name: duplicated-imports
- name: early-return
- name: empty-block
- name: empty-lines
- name: enforce-map-style
exclude: [ "TEST" ]
arguments:
- "make"
- name: enforce-repeated-arg-type-style
arguments:
- "short"
- name: enforce-slice-style
arguments:
- "make"
- name: error-naming
- name: error-return
- name: error-strings
- name: errorf
- name: exported
exclude:
- "**/accumulator.go"
- "**/agent/**"
- "**/cmd/**"
- "**/config/**"
- "**/filter/**"
- "**/internal/**"
- "**/logger/**"
- "**/logger.go"
- "**/metric/**"
- "**/metric.go"
- "**/migrations/**"
- "**/models/**"
- "**/persister/**"
- "**/metric.go"
- "**/parser.go"
- "**/plugin.go"
- "**/plugins/aggregators/**"
- "**/plugins/common/**"
- "**/plugins/outputs/**"
- "**/plugins/parsers/**"
- "**/plugins/processors/**"
- "**/plugins/secretstores/**"
- "**/plugins/serializers/**"
- "**/selfstat/**"
- "**/serializer.go"
- "**/testutil/**"
- "**/tools/**"
arguments:
- "check-private-receivers"
- "say-repetitive-instead-of-stutters"
- "check-public-interface"
- "disable-checks-on-types"
- name: function-result-limit
arguments: [ 3 ]
- name: get-return
- name: identical-branches
- name: if-return
- name: import-alias-naming
arguments:
- "^[a-z][a-z0-9_]*[a-z0-9]+$"
- name: import-shadowing
- name: increment-decrement
- name: indent-error-flow
- name: max-public-structs
arguments: [ 5 ]
exclude: [ "TEST" ]
- name: modifies-parameter
- name: modifies-value-receiver
- name: optimize-operands-order
- name: package-comments
- name: range
- name: range-val-address
- name: range-val-in-closure
- name: receiver-naming
- name: redefines-builtin-id
- name: redundant-import-alias
- name: string-format
arguments:
- - 'fmt.Errorf[0],errors.New[0]'
- '/^([^A-Z]|$)/'
- 'Error string must not start with a capital letter.'
- - 'fmt.Errorf[0],errors.New[0]'
- '/(^|[^\.!?])$/'
- 'Error string must not end in punctuation.'
- - 'panic'
- '/^[^\n]*$/'
- 'Must not contain line breaks.'
- name: string-of-int
- name: struct-tag
- name: superfluous-else
- name: time-equal
- name: time-naming
- name: unconditional-recursion
- name: unexported-naming
- name: unnecessary-stmt
- name: unreachable-code
- name: unused-parameter
- name: unused-receiver
- name: var-declaration
- name: var-naming
arguments:
- [ ] # AllowList
- [ "ID", "DB", "TS" ] # DenyList
- name: waitgroup-by-value
staticcheck:
# SAxxxx checks in https://staticcheck.dev/docs/configuration/options/#checks
# Example (to disable some checks): [ "all", "-SA1000", "-SA1001"]
# Default: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022"]
checks:
- all
# Poorly chosen identifier.
# https://staticcheck.dev/docs/checks/#ST1003
- -ST1003
# The documentation of an exported function should start with the function's name.
# https://staticcheck.dev/docs/checks/#ST1020
- -ST1020
# The documentation of an exported type should start with type's name.
# https://staticcheck.dev/docs/checks/#ST1021
- -ST1021
# The documentation of an exported variable or constant should start with variable's name.
# https://staticcheck.dev/docs/checks/#ST1022
- -ST1022
# Apply De Morgan's law.
# https://staticcheck.dev/docs/checks/#QF1001
- -QF1001
# Convert if/else-if chain to tagged switch.
# https://staticcheck.dev/docs/checks/#QF1003
- -QF1003
# Use 'strings.ReplaceAll' instead of 'strings.Replace' with 'n == -1'.
# https://staticcheck.dev/docs/checks/#QF1004
- -QF1004
# Lift 'if'+'break' into loop condition.
# https://staticcheck.dev/docs/checks/#QF1006
- -QF1006
# Merge conditional assignment into variable declaration.
# https://staticcheck.dev/docs/checks/#QF1007
- -QF1007
# Omit embedded fields from selector expression.
# https://staticcheck.dev/docs/checks/#QF1008
- -QF1008
# Use 'time.Time.Equal' instead of '==' operator.
# https://staticcheck.dev/docs/checks/#QF1009
- -QF1009
testifylint:
# Disable all checkers (https://github.com/Antonboom/testifylint#checkers).
# Default: false
disable-all: true
# Enable checkers by name
enable:
- blank-import
- bool-compare
- compares
- contains
- empty
- encoded-compare
- error-is-as
- error-nil
- expected-actual
- float-compare
- formatter
- go-require
- len
- negative-positive
- nil-compare
- regexp
- require-error
- suite-broken-parallel
- suite-dont-use-pkg
- suite-extra-assert-call
- suite-subtest-run
- suite-thelper
- useless-assert
usetesting:
# Enable/disable `os.TempDir()` detections.
# Default: false
os-temp-dir: true
# Defines a set of rules to ignore issues.
# It does not skip the analysis, and so does not ignore "typecheck" errors.
exclusions:
# Mode of the generated files analysis.
#
# - `strict`: sources are excluded by strictly following the Go generated file convention.
# Source files that have lines matching only the following regular expression will be excluded: `^// Code generated .* DO NOT EDIT\.$`
# This line must appear before the first non-comment, non-blank text in the file.
# https://go.dev/s/generatedcode
# - `lax`: sources are excluded if they contain lines like `autogenerated file`, `code generated`, `do not edit`, etc.
# - `disable`: disable the generated files exclusion.
#
# Default: strict
generated: lax
# Excluding configuration per-path, per-linter, per-text and per-source.
rules:
# errcheck
- path: cmd/telegraf/(main|printer|cmd_plugins).go
text: "Error return value of `outputBuffer.Write` is not checked"
- path: plugins/inputs/win_perf_counters/pdh.go
linters:
- errcheck
# gosec:G101
- path: _test\.go
text: "Potential hardcoded credentials"
# gosec:G404
- path: _test\.go
text: "Use of weak random number generator"
# revive:max-public-structs
- path-except: ^plugins/(aggregators|inputs|outputs|parsers|processors|serializers)/...
text: "max-public-structs: you have exceeded the maximum number"
# revive:var-naming
- path: (.+)\.go$
text: don't use an underscore in package name
# revive:exported
- path: (.+)\.go$
text: exported method .*\.(Init |SampleConfig |Gather |Start |Stop |GetState |SetState |SetParser |SetParserFunc |SetTranslator |Probe )should have comment or be unexported
# EXC0001 errcheck: Almost all programs ignore errors on these functions, and in most cases it's ok
- path: (.+)\.go$
text: Error return value of .((os\.)?std(out|err)\..*|.*Close.*|.*close.*|.*Flush|.*Disconnect|.*disconnect|.*Clear|os\.Remove(All)?|.*print(f|ln)?|os\.Setenv|os\.Unsetenv). is not checked
# EXC0013 revive: Annoying issue about not having a comment. The rare codebase has such comments
- path: (.+)\.go$
text: package comment should be of the form "(.+)...
# EXC0015 revive: Annoying issue about not having a comment. The rare codebase has such comments
- path: (.+)\.go$
text: should have a package comment
# Which file paths to exclude: they will be analyzed, but issues from them won't be reported.
# "/" will be replaced by the current OS file path separator to properly work on Windows.
# Default: []
paths:
- plugins/parsers/influx/machine.go*
formatters:
# Enable specific formatter.
# Default: [] (uses standard Go formatting)
enable:
- gci
# Formatters settings.
settings:
gci:
# Section configuration to compare against.
# Section names are case-insensitive and may contain parameters in ().
# The default order of sections is `standard > default > custom > blank > dot > alias > localmodule`.
# If `custom-order` is `true`, it follows the order of `sections` option.
# Default: ["standard", "default"]
sections:
- standard # Standard section: captures all standard packages.
- default # Default section: contains all imports that could not be matched to another section type.
- localmodule # Local module section: contains all local packages. This section is not present unless explicitly enabled.
exclusions:
# Mode of the generated files analysis.
#
# - `strict`: sources are excluded by strictly following the Go generated file convention.
# Source files that have lines matching only the following regular expression will be excluded: `^// Code generated .* DO NOT EDIT\.$`
# This line must appear before the first non-comment, non-blank text in the file.
# https://go.dev/s/generatedcode
# - `lax`: sources are excluded if they contain lines like `autogenerated file`, `code generated`, `do not edit`, etc.
# - `disable`: disable the generated files exclusion.
#
# Default: lax
generated: lax
issues:
# Maximum issues count per one linter.
# Set to 0 to disable.
# Default: 50
max-issues-per-linter: 0
# Maximum count of issues with the same text.
# Set to 0 to disable.
# Default: 3
max-same-issues: 0
# Make issues output unique by line.
# Default: true
uniq-by-line: false
# Output configuration options.
output:
# The formats used to render issues.
formats:
# Prints issues in columns representation separated by tabulations.
tab:
# Output path can be either `stdout`, `stderr` or path to the file to write to.
# Default: stdout
path: stdout
# Order to use when sorting results.
# Possible values: `file`, `linter`, and `severity`.
#
# If the severity values are inside the following list, they are ordered in this order:
# 1. error
# 2. warning
# 3. high
# 4. medium
# 5. low
# Either they are sorted alphabetically.
#
# Default: ["linter", "file"]
sort-order:
- file # filepath, line, and column.
- linter
# Show statistics per linter.
# Default: true
show-stats: true
severity:
# Set the default severity for issues.
#
# If severity rules are defined and the issues do not match or no severity is provided to the rule
# this will be the default severity applied.
# Severities should match the supported severity names of the selected out format.
# - Code climate: https://docs.codeclimate.com/docs/issues#issue-severity
# - Checkstyle: https://checkstyle.sourceforge.io/property_types.html#SeverityLevel
# - GitHub: https://help.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message
# - TeamCity: https://www.jetbrains.com/help/teamcity/service-messages.html#Inspection+Instance
#
# `@linter` can be used as severity value to keep the severity from linters (e.g. revive, gosec, ...)
#
# Default: ""
default: error

6
.markdownlint.yml Normal file
View file

@ -0,0 +1,6 @@
{
"MD013": false,
"MD033": {
"allowed_elements": ["br"]
}
}

2
.markdownlintignore Normal file
View file

@ -0,0 +1,2 @@
.github/PULL_REQUEST_TEMPLATE.md
docs/includes/*

2795
CHANGELOG-1.13.md Normal file

File diff suppressed because it is too large Load diff

4258
CHANGELOG.md Normal file

File diff suppressed because it is too large Load diff

77
CODE_OF_CONDUCT.md Normal file
View file

@ -0,0 +1,77 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
`community@influxdata.com`.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
[version 2.1][v2.1].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html

129
CONTRIBUTING.md Normal file
View file

@ -0,0 +1,129 @@
# Contributing to Telegraf
There are many ways to get involved in the Telegraf project! From opening issues, creating pull requests, to joining the conversation in Slack. We would love to see you contribute your expertise and join our community. To get started review this document to learn best practices.
![tiger](assets/GopherAndTiger.png "tiger")
## Opening Issues
### Bug reports
Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please ensure you include all the requested details (e.g. Telegraf config and logs, platform, etc.)
Please note that issues are not the place to file general support requests such as "How do I use the mongoDB plugin?" Questions of this nature should be sent to the [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/), not filed as issues.
### Feature requests
We really like to receive feature requests as it helps us prioritize our work. Before you file a feature request, please search existing issues, you can filter issues that have the label `feature request`. Please be clear about your requirements and goals, help us to understand what you would like to see added to Telegraf with examples and the reasons why it is important to you. If you find your feature request already exists as a Github issue please indicate your support for that feature by using the "thumbs up" reaction.
### Support questions
We recommend posting support questions in our [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/), we have a lot of talented community members there who could help answer your question more quickly.
## Contributing code
### AI Generated Code
We currently cannot accept AI generated code contributions. Code contributed
should be your own per the CLA.
### Creating a pull request
1. [Sign the CLA][cla].
2. Open a [new issue][] to discuss the changes you would like to make. This is
not strictly required but it may help reduce the amount of rework you need
to do later.
3. Make changes or write plugin using the guidelines in the following
documents:
- [Input Plugins][inputs]
- [Processor Plugins][processors]
- [Aggregator Plugins][aggregators]
- [Output Plugins][outputs]
4. Ensure you have added proper unit tests and documentation.
5. Open a new [pull request][].
6. The pull request title needs to follow [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary)
**Note:** If you have a pull request with only one commit, then that commit needs to follow the conventional commit format or the `Semantic Pull Request` check will fail. This is because github will use the pull request title if there are multiple commits, but if there is only one commit it will use it instead.
### When will your contribution get released?
We have two kinds of releases: patch releases, which happen every few weeks, and feature releases, which happen once a quarter. If your fix is a bug fix, it will be released in the next patch release after it is merged to master. If your release is a new plugin or other feature, it will be released in the next quarterly release after it is merged to master. Quarterly releases are on the third Wednesday of March, June, September, and December.
### Contributing an External Plugin
Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](/plugins/inputs/execd), [Execd Output](/plugins/outputs/execd), and [Execd Processor](/plugins/processors/execd) Plugins without having to change the plugin code.
Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin.
Check out our [guidelines](/docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`.
## Security Vulnerability Reporting
InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our
open source projects, please responsibly disclose it by contacting `security@influxdata.com`. More details about
security vulnerability reporting,
including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/).
## Common development tasks
**Adding a dependency:**
Telegraf uses Go modules. Assuming you can already build the project, run this in the telegraf directory:
1. `go get github.com/[dependency]/[new-package]`
**Before opening a PR:**
Before opening a pull request you should run the following checks locally to make sure the CI will pass.
```shell
make lint
make check
make check-deps
make test
make docs
```
**Execute integration tests:**
(Optional)
To run only the integration tests use:
```shell
make test-integration
```
To run the full test suite use:
```shell
make test-all
```
### For more developer resources
- [Code Style][codestyle]
- [Deprecation][deprecation]
- [Logging][logging]
- [Metric Format Changes][metricformat]
- [Packaging][packaging]
- [Profiling][profiling]
- [Reviews][reviews]
- [Sample Config][sample config]
- [Code of Conduct][code of conduct]
[cla]: https://www.influxdata.com/legal/cla/
[new issue]: https://github.com/influxdata/telegraf/issues/new/choose
[pull request]: https://github.com/influxdata/telegraf/compare
[inputs]: /docs/INPUTS.md
[processors]: /docs/PROCESSORS.md
[aggregators]: /docs/AGGREGATORS.md
[outputs]: /docs/OUTPUTS.md
[codestyle]: /docs/developers/CODE_STYLE.md
[deprecation]: /docs/developers/DEPRECATION.md
[logging]: /docs/developers/LOGGING.md
[metricformat]: /docs/developers/METRIC_FORMAT_CHANGES.md
[packaging]: /docs/developers/PACKAGING.md
[profiling]: /docs/developers/PROFILING.md
[reviews]: /docs/developers/REVIEWS.md
[sample config]: /docs/developers/SAMPLE_CONFIG.md
[code of conduct]: /CODE_OF_CONDUCT.md

48
EXTERNAL_PLUGINS.md Normal file
View file

@ -0,0 +1,48 @@
# External Plugins
This is a list of plugins that can be compiled outside of Telegraf and used via the `execd` [input](plugins/inputs/execd), [output](plugins/outputs/execd), or [processor](plugins/processors/execd).
Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for more information on writing and contributing a plugin.
Pull requests welcome.
## Inputs
- [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS.
- [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API.
- [opcda](https://github.com/lpc921/telegraf-execd-opcda) - Gather data from [OPC Foundation's Data Access (DA)](https://opcfoundation.org/about/opc-technologies/opc-classic/) protocol for industrial automation.
- [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org)
- [plex](https://github.com/russorat/telegraf-webhooks-plex) - Listens for events from Plex Media Server [Webhooks](https://support.plex.tv/articles/115002267687-webhooks/).
- [rand](https://github.com/ssoroka/rand) - Generate random numbers
- [SMCIPMITool](https://github.com/jhpope/smc_ipmi) - Python script to parse the output of [SMCIPMITool](https://www.supermicro.com/en/solutions/management-software/ipmi-utilities) into [InfluxDB line protocol](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/).
- [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics.
- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts
- [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels
- [Big Blue Button](https://github.com/bigblueswarm/bigbluebutton-telegraf-plugin) - Gather meetings information from [Big Blue Button](https://bigbluebutton.org/) server
- [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - Gather dnsmasq statistics from dnsmasq
- [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - Gather statistics from 389ds and from LDAP trees.
- [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - Gather information from your X509 CRL files
- [s7comm](https://github.com/nicolasme/s7comm) - Gather information from Siemens PLC
- [net_irtt](https://github.com/iAnatoly/telegraf-input-net_irtt) - Gather information from IRTT network test
- [dht_sensor](https://github.com/iAnatoly/telegraf-input-dht_sensor) - Gather temperature and humidity from DHTXX sensors
- [oracle](https://github.com/bonitoo-io/telegraf-input-oracle) - Gather the statistic data from Oracle RDBMS
- [db2](https://github.com/bonitoo-io/telegraf-input-db2) - Gather the statistic data from DB2 RDBMS
- [apt](https://github.com/x70b1/telegraf-apt) - Check Debian for package updates.
- [knot](https://github.com/x70b1/telegraf-knot) - Collect stats from Knot DNS.
- [fritzbox](https://github.com/hdecarne-github/fritzbox-telegraf-plugin) - Gather statistics from [FRITZ!Box](https://avm.de/produkte/fritzbox/) router and repeater
- [linux-psi-telegraf-plugin](https://github.com/gridscale/linux-psi-telegraf-plugin) - Gather pressure stall information ([PSI](https://facebookmicrosites.github.io/psi/)) from the Linux Kernel
- [hwinfo](https://github.com/zachstence/hwinfo-telegraf-plugin) - Gather Windows system hardware information from [HWiNFO](https://www.hwinfo.com/)
- [libvirt](https://gitlab.com/warrenio/tools/telegraf-input-libvirt) - Gather libvirt domain stats, based on a historical Telegraf implementation [libvirt](https://libvirt.org/)
- [bacnet](https://github.com/JurajMarcin/telegraf-bacnet) - Gather statistics from BACnet devices, with support for device discovery and Change of Value subscriptions
- [tado](https://github.com/zoeimogen/tado-telegraf-plugin) - Gather zone temperature settings and current temperature/humidity readings from Tado
- [homekit](https://github.com/hdecarne-github/homekit-telegraf-plugin) - Gather smart home statistics from [HomeKit](https://en.wikipedia.org/wiki/HomeKit) devices via Home Hub automation
## Outputs
- [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis.
- [firehose](https://github.com/muhlba91/telegraf-output-kinesis-data-firehose) - Sends metrics in batches to Amazon Kinesis Data Firehose.
- [playfab](https://github.com/dgkanatsios/telegraftoplayfab) - Sends metrics to [Azure PlayFab](https://learn.microsoft.com/en-us/gaming/playfab/).
## Processors
- [geoip](https://github.com/a-bali/telegraf-geoip) - Add GeoIP information to IP addresses.
- [metadata](https://github.com/lawdt/metadata) - Appends metadata gathered from Openstack to metrics.

21
LICENSE Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015-2025 InfluxData Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

502
Makefile Normal file
View file

@ -0,0 +1,502 @@
ifneq (,$(filter $(OS),Windows_NT Windows))
EXEEXT=.exe
endif
cat := $(if $(filter $(OS),sh.exe),type,cat)
next_version := $(shell $(cat) build_version.txt)
tag := $(shell git describe --exact-match --tags 2>/dev/null)
branch := $(shell git rev-parse --abbrev-ref HEAD)
commit := $(shell git rev-parse --short=8 HEAD)
ifdef NIGHTLY
version := $(next_version)
rpm_version := nightly
rpm_iteration := 0
deb_version := nightly
deb_iteration := 0
tar_version := nightly
else ifeq ($(tag),)
version := $(next_version)
rpm_version := $(version)~$(commit)-0
rpm_iteration := 0
deb_version := $(version)~$(commit)-0
deb_iteration := 0
tar_version := $(version)~$(commit)
else ifneq ($(findstring -rc,$(tag)),)
version := $(word 1,$(subst -, ,$(tag)))
version := $(version:v%=%)
rc := $(word 2,$(subst -, ,$(tag)))
rpm_version := $(version)-0.$(rc)
rpm_iteration := 0.$(subst rc,,$(rc))
deb_version := $(version)~$(rc)-1
deb_iteration := 0
tar_version := $(version)~$(rc)
else
version := $(tag:v%=%)
rpm_version := $(version)-1
rpm_iteration := 1
deb_version := $(version)-1
deb_iteration := 1
tar_version := $(version)
endif
MAKEFLAGS += --no-print-directory
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
HOSTGO := env -u GOOS -u GOARCH -u GOARM -- go
INTERNAL_PKG=github.com/influxdata/telegraf/internal
LDFLAGS := $(LDFLAGS) -X $(INTERNAL_PKG).Commit=$(commit) -X $(INTERNAL_PKG).Branch=$(branch)
ifneq ($(tag),)
LDFLAGS += -X $(INTERNAL_PKG).Version=$(version)
else
LDFLAGS += -X $(INTERNAL_PKG).Version=$(version)-$(commit)
endif
# Go built-in race detector works only for 64 bits architectures.
ifneq ($(GOARCH), 386)
# Resolve macOS issue with Xcode 15 when running in race detector mode
# https://github.com/golang/go/issues/61229
ifeq ($(GOOS), darwin)
race_detector := -race -ldflags=-extldflags=-Wl,-ld_classic
else
race_detector := -race
endif
endif
GOFILES ?= $(shell git ls-files '*.go')
GOFMT ?= $(shell gofmt -l -s $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
prefix ?= /usr/local
bindir ?= $(prefix)/bin
sysconfdir ?= $(prefix)/etc
localstatedir ?= $(prefix)/var
pkgdir ?= build/dist
.PHONY: all
all: deps docs telegraf
.PHONY: help
help:
@echo 'Targets:'
@echo ' all - download dependencies and compile telegraf binary'
@echo ' config - generate the config from current repo state'
@echo ' deps - download dependencies'
@echo ' docs - embed sample-configurations into READMEs'
@echo ' telegraf - compile telegraf binary'
@echo ' test - run short unit tests'
@echo ' fmt - format source files'
@echo ' tidy - tidy go modules'
@echo ' lint - run linter'
@echo ' lint-branch - run linter on changes in current branch since master'
@echo ' lint-install - install linter'
@echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md'
@echo ' clean - delete build artifacts'
@echo ' package - build all supported packages, override include_packages to only build a subset'
@echo ' e.g.: make package include_packages="amd64.deb"'
@echo ''
@echo 'Possible values for include_packages variable'
@$(foreach package,$(include_packages),echo " $(package)";)
@echo ''
@echo 'Resulting package name format (where arch will be the arch of the package):'
@echo ' telegraf_$(deb_version)_arch.deb'
@echo ' telegraf-$(rpm_version).arch.rpm'
@echo ' telegraf-$(tar_version)_arch.tar.gz'
@echo ' telegraf-$(tar_version)_arch.zip'
.PHONY: deps
deps:
go mod download -x
.PHONY: version
version:
@echo $(version)-$(commit)
build_tools:
$(HOSTGO) build -o ./tools/custom_builder/custom_builder$(EXEEXT) ./tools/custom_builder
$(HOSTGO) build -o ./tools/license_checker/license_checker$(EXEEXT) ./tools/license_checker
$(HOSTGO) build -o ./tools/readme_config_includer/generator$(EXEEXT) ./tools/readme_config_includer/generator.go
$(HOSTGO) build -o ./tools/config_includer/generator$(EXEEXT) ./tools/config_includer/generator.go
$(HOSTGO) build -o ./tools/readme_linter/readme_linter$(EXEEXT) ./tools/readme_linter
embed_readme_%:
go generate -run="tools/config_includer/generator" ./plugins/$*/...
go generate -run="tools/readme_config_includer/generator" ./plugins/$*/...
.PHONY: config
config:
@echo "generating default config"
go run ./cmd/telegraf config > etc/telegraf.conf
.PHONY: docs
docs: build_tools embed_readme_inputs embed_readme_outputs embed_readme_processors embed_readme_aggregators embed_readme_secretstores
.PHONY: build
build:
CGO_ENABLED=0 go build -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS)" ./cmd/telegraf
.PHONY: telegraf
telegraf: build
# Used by dockerfile builds
.PHONY: go-install
go-install:
go install -mod=mod -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
.PHONY: test
test:
go test -short $(race_detector) ./...
.PHONY: test-integration
test-integration:
go test -run Integration $(race_detector) ./...
.PHONY: fmt
fmt:
@gofmt -s -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))
.PHONY: fmtcheck
fmtcheck:
@if [ ! -z "$(GOFMT)" ]; then \
echo "[ERROR] gofmt has found errors in the following files:" ; \
echo "$(GOFMT)" ; \
echo "" ;\
echo "Run make fmt to fix them." ; \
exit 1 ;\
fi
.PHONY: vet
vet:
@echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)'
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -ne 0 ]; then \
echo ""; \
echo "go vet has found suspicious constructs. Please remediate any reported errors"; \
echo "to fix them before submitting code for review."; \
exit 1; \
fi
.PHONY: lint-install
lint-install:
@echo "Installing golangci-lint"
go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.1.2
@echo "Installing markdownlint"
npm install -g markdownlint-cli
.PHONY: lint
lint:
@which golangci-lint >/dev/null 2>&1 || { \
echo "golangci-lint not found, please run: make lint-install"; \
exit 1; \
}
golangci-lint run
@which markdownlint >/dev/null 2>&1 || { \
echo "markdownlint not found, please run: make lint-install"; \
exit 1; \
}
markdownlint .
.PHONY: lint-branch
lint-branch:
@which golangci-lint >/dev/null 2>&1 || { \
echo "golangci-lint not found, please run: make lint-install"; \
exit 1; \
}
golangci-lint run
.PHONY: tidy
tidy:
go mod verify
go mod tidy
@if ! git diff --quiet go.mod go.sum; then \
echo "please run go mod tidy and check in changes, you might have to use the same version of Go as the CI"; \
exit 1; \
fi
.PHONY: check
check: fmtcheck vet
.PHONY: test-all
test-all: fmtcheck vet
go test $(race_detector) ./...
.PHONY: check-deps
check-deps:
./scripts/check-deps.sh
.PHONY: clean
clean:
rm -f telegraf
rm -f telegraf.exe
rm -f etc/telegraf.conf
rm -rf build
rm -rf cmd/telegraf/resource.syso
rm -rf cmd/telegraf/versioninfo.json
rm -rf tools/config_includer/generator
rm -rf tools/config_includer/generator.exe
rm -rf tools/custom_builder/custom_builder
rm -rf tools/custom_builder/custom_builder.exe
rm -rf tools/license_checker/license_checker
rm -rf tools/license_checker/license_checker.exe
rm -rf tools/package_incus_test/package_incus_test
rm -rf tools/package_incus_test/package_incus_test.exe
rm -rf tools/readme_config_includer/generator
rm -rf tools/readme_config_includer/generator.exe
rm -rf tools/readme_linter/readme_linter
rm -rf tools/readme_linter/readme_linter.exe
.PHONY: docker-image
docker-image:
docker build -f scripts/buster.docker -t "telegraf:$(commit)" .
plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl
ragel -Z -G2 $^ -o $@
.PHONY: ci
ci:
docker build -t quay.io/influxdb/telegraf-ci:1.24.3 - < scripts/ci.docker
docker push quay.io/influxdb/telegraf-ci:1.24.3
.PHONY: install
install: $(buildbin)
@mkdir -pv $(DESTDIR)$(bindir)
@mkdir -pv $(DESTDIR)$(sysconfdir)
@mkdir -pv $(DESTDIR)$(localstatedir)
@if [ $(GOOS) != "windows" ]; then mkdir -pv $(DESTDIR)$(sysconfdir)/logrotate.d; fi
@if [ $(GOOS) != "windows" ]; then mkdir -pv $(DESTDIR)$(localstatedir)/log/telegraf; fi
@if [ $(GOOS) != "windows" ]; then mkdir -pv $(DESTDIR)$(sysconfdir)/telegraf/telegraf.d; fi
@cp -fv $(buildbin) $(DESTDIR)$(bindir)
@if [ $(GOOS) != "windows" ]; then cp -fv etc/telegraf.conf $(DESTDIR)$(sysconfdir)/telegraf/telegraf.conf$(conf_suffix); fi
@if [ $(GOOS) != "windows" ]; then cp -fv etc/logrotate.d/telegraf $(DESTDIR)$(sysconfdir)/logrotate.d; fi
@if [ $(GOOS) = "windows" ]; then cp -fv etc/telegraf.conf $(DESTDIR)/telegraf.conf; fi
@if [ $(GOOS) = "linux" ]; then mkdir -pv $(DESTDIR)$(prefix)/lib/telegraf/scripts; fi
@if [ $(GOOS) = "linux" ]; then cp -fv scripts/telegraf.service $(DESTDIR)$(prefix)/lib/telegraf/scripts; fi
@if [ $(GOOS) = "linux" ]; then cp -fv scripts/init.sh $(DESTDIR)$(prefix)/lib/telegraf/scripts; fi
# Telegraf build per platform. This improves package performance by sharing
# the bin between deb/rpm/tar packages over building directly into the package
# directory.
.PHONY: $(buildbin)
$(buildbin):
echo $(GOOS)
@mkdir -pv $(dir $@)
CGO_ENABLED=0 go build -o $(dir $@) -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS)" ./cmd/telegraf
# Define packages Telegraf supports, organized by architecture with a rule to echo the list to limit include_packages
# e.g. make package include_packages="$(make amd64)"
mips += linux_mips.tar.gz mips.deb
.PHONY: mips
mips:
@ echo $(mips)
mipsel += mipsel.deb linux_mipsel.tar.gz
.PHONY: mipsel
mipsel:
@ echo $(mipsel)
loong64 += linux_loong64.tar.gz loong64.deb loong64.rpm
.PHONY: loong64
loong64:
@ echo $(loong64)
arm64 += linux_arm64.tar.gz arm64.deb aarch64.rpm
.PHONY: arm64
arm64:
@ echo $(arm64)
amd64 += freebsd_amd64.tar.gz linux_amd64.tar.gz amd64.deb x86_64.rpm
.PHONY: amd64
amd64:
@ echo $(amd64)
armel += linux_armel.tar.gz armel.rpm armel.deb
.PHONY: armel
armel:
@ echo $(armel)
armhf += linux_armhf.tar.gz freebsd_armv7.tar.gz armhf.deb armv6hl.rpm
.PHONY: armhf
armhf:
@ echo $(armhf)
s390x += linux_s390x.tar.gz s390x.deb s390x.rpm
.PHONY: riscv64
riscv64:
@ echo $(riscv64)
riscv64 += linux_riscv64.tar.gz riscv64.rpm riscv64.deb
.PHONY: s390x
s390x:
@ echo $(s390x)
ppc64le += linux_ppc64le.tar.gz ppc64le.rpm ppc64el.deb
.PHONY: ppc64le
ppc64le:
@ echo $(ppc64le)
i386 += freebsd_i386.tar.gz i386.deb linux_i386.tar.gz i386.rpm
.PHONY: i386
i386:
@ echo $(i386)
windows += windows_i386.zip windows_amd64.zip windows_arm64.zip
.PHONY: windows
windows:
@ echo $(windows)
darwin-amd64 += darwin_amd64.tar.gz
.PHONY: darwin-amd64
darwin-amd64:
@ echo $(darwin-amd64)
darwin-arm64 += darwin_arm64.tar.gz
.PHONY: darwin-arm64
darwin-arm64:
@ echo $(darwin-arm64)
include_packages := $(mips) $(mipsel) $(arm64) $(amd64) $(armel) $(armhf) $(riscv64) $(loong64) $(s390x) $(ppc64le) $(i386) $(windows) $(darwin-amd64) $(darwin-arm64)
.PHONY: package
package: docs config $(include_packages)
.PHONY: $(include_packages)
$(include_packages):
if [ "$(suffix $@)" = ".zip" ]; then go generate cmd/telegraf/telegraf_windows.go; fi
@$(MAKE) install
@mkdir -p $(pkgdir)
@if [ "$(suffix $@)" = ".rpm" ]; then \
echo "# DO NOT EDIT OR REMOVE" > $(DESTDIR)$(sysconfdir)/telegraf/telegraf.d/.ignore; \
echo "# This file prevents the rpm from changing permissions on this directory" >> $(DESTDIR)$(sysconfdir)/telegraf/telegraf.d/.ignore; \
fpm --force \
--log info \
--architecture $(basename $@) \
--input-type dir \
--output-type rpm \
--vendor InfluxData \
--url https://github.com/influxdata/telegraf \
--license MIT \
--maintainer support@influxdb.com \
--config-files /etc/telegraf/telegraf.conf \
--config-files /etc/telegraf/telegraf.d/.ignore \
--config-files /etc/logrotate.d/telegraf \
--after-install scripts/rpm/post-install.sh \
--before-install scripts/rpm/pre-install.sh \
--after-remove scripts/rpm/post-remove.sh \
--description "Plugin-driven server agent for reporting metrics into InfluxDB." \
--depends coreutils \
--rpm-digest sha256 \
--rpm-posttrans scripts/rpm/post-install.sh \
--rpm-os ${GOOS} \
--rpm-tag "Requires(pre): /usr/sbin/useradd" \
--name telegraf \
--version $(version) \
--iteration $(rpm_iteration) \
--chdir $(DESTDIR) \
--package $(pkgdir)/telegraf-$(rpm_version).$@ ;\
elif [ "$(suffix $@)" = ".deb" ]; then \
fpm --force \
--log info \
--architecture $(basename $@) \
--input-type dir \
--output-type deb \
--vendor InfluxData \
--url https://github.com/influxdata/telegraf \
--license MIT \
--maintainer support@influxdb.com \
--config-files /etc/telegraf/telegraf.conf.sample \
--config-files /etc/logrotate.d/telegraf \
--after-install scripts/deb/post-install.sh \
--before-install scripts/deb/pre-install.sh \
--after-remove scripts/deb/post-remove.sh \
--before-remove scripts/deb/pre-remove.sh \
--description "Plugin-driven server agent for reporting metrics into InfluxDB." \
--name telegraf \
--version $(version) \
--iteration $(deb_iteration) \
--chdir $(DESTDIR) \
--package $(pkgdir)/telegraf_$(deb_version)_$@ ;\
elif [ "$(suffix $@)" = ".zip" ]; then \
(cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/telegraf-$(tar_version)_$@ ;\
elif [ "$(suffix $@)" = ".gz" ]; then \
tar --owner 0 --group 0 -czvf $(pkgdir)/telegraf-$(tar_version)_$@ -C $(dir $(DESTDIR)) . ;\
fi
amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOOS := linux
amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOARCH := amd64
i386.deb i386.rpm linux_i386.tar.gz: export GOOS := linux
i386.deb i386.rpm linux_i386.tar.gz: export GOARCH := 386
armel.deb armel.rpm linux_armel.tar.gz: export GOOS := linux
armel.deb armel.rpm linux_armel.tar.gz: export GOARCH := arm
armel.deb armel.rpm linux_armel.tar.gz: export GOARM := 5
armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOOS := linux
armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARCH := arm
armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARM := 6
arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOOS := linux
arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARCH := arm64
arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARM := 7
mips.deb linux_mips.tar.gz: export GOOS := linux
mips.deb linux_mips.tar.gz: export GOARCH := mips
mipsel.deb linux_mipsel.tar.gz: export GOOS := linux
mipsel.deb linux_mipsel.tar.gz: export GOARCH := mipsle
riscv64.deb riscv64.rpm linux_riscv64.tar.gz: export GOOS := linux
riscv64.deb riscv64.rpm linux_riscv64.tar.gz: export GOARCH := riscv64
loong64.deb loong64.rpm linux_loong64.tar.gz: export GOOS := linux
loong64.deb loong64.rpm linux_loong64.tar.gz: export GOARCH := loong64
s390x.deb s390x.rpm linux_s390x.tar.gz: export GOOS := linux
s390x.deb s390x.rpm linux_s390x.tar.gz: export GOARCH := s390x
ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOOS := linux
ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOARCH := ppc64le
freebsd_amd64.tar.gz: export GOOS := freebsd
freebsd_amd64.tar.gz: export GOARCH := amd64
freebsd_i386.tar.gz: export GOOS := freebsd
freebsd_i386.tar.gz: export GOARCH := 386
freebsd_armv7.tar.gz: export GOOS := freebsd
freebsd_armv7.tar.gz: export GOARCH := arm
freebsd_armv7.tar.gz: export GOARM := 7
windows_amd64.zip: export GOOS := windows
windows_amd64.zip: export GOARCH := amd64
windows_arm64.zip: export GOOS := windows
windows_arm64.zip: export GOARCH := arm64
darwin_amd64.tar.gz: export GOOS := darwin
darwin_amd64.tar.gz: export GOARCH := amd64
darwin_arm64.tar.gz: export GOOS := darwin
darwin_arm64.tar.gz: export GOARCH := arm64
windows_i386.zip: export GOOS := windows
windows_i386.zip: export GOARCH := 386
windows_i386.zip windows_amd64.zip windows_arm64.zip: export prefix =
windows_i386.zip windows_amd64.zip windows_arm64.zip: export bindir = $(prefix)
windows_i386.zip windows_amd64.zip windows_arm64.zip: export sysconfdir = $(prefix)
windows_i386.zip windows_amd64.zip windows_arm64.zip: export localstatedir = $(prefix)
windows_i386.zip windows_amd64.zip windows_arm64.zip: export EXEEXT := .exe
%.deb: export pkg := deb
%.deb: export prefix := /usr
%.deb: export conf_suffix := .sample
%.deb: export sysconfdir := /etc
%.deb: export localstatedir := /var
%.rpm: export pkg := rpm
%.rpm: export prefix := /usr
%.rpm: export sysconfdir := /etc
%.rpm: export localstatedir := /var
%.tar.gz: export pkg := tar
%.tar.gz: export prefix := /usr
%.tar.gz: export sysconfdir := /etc
%.tar.gz: export localstatedir := /var
%.zip: export pkg := zip
%.zip: export prefix := /
%.deb %.rpm %.tar.gz %.zip: export DESTDIR = build/$(GOOS)-$(GOARCH)$(GOARM)-$(pkg)/telegraf-$(version)
%.deb %.rpm %.tar.gz %.zip: export buildbin = build/$(GOOS)-$(GOARCH)$(GOARM)/telegraf$(EXEEXT)
%.deb %.rpm %.tar.gz %.zip: export LDFLAGS = -w -s

126
README.md Normal file
View file

@ -0,0 +1,126 @@
# ![tiger](assets/TelegrafTigerSmall.png "tiger") Telegraf
[![GoDoc](https://img.shields.io/badge/doc-reference-00ADD8.svg?logo=go)](https://godoc.org/github.com/influxdata/telegraf)
[![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/)
[![Go Report Card](https://goreportcard.com/badge/github.com/influxdata/telegraf)](https://goreportcard.com/report/github.com/influxdata/telegraf)
[![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf)
Telegraf is an agent for collecting, processing, aggregating, and writing
metrics, logs, and other arbitrary data.
* Offers a comprehensive suite of over 300 plugins, covering a wide range of
functionalities including system monitoring, cloud services, and message
passing
* Enables the integration of user-defined code to collect, transform, and
transmit data efficiently
* Compiles into a standalone static binary without any external dependencies,
ensuring a streamlined deployment process
* Utilizes TOML for configuration, providing a user-friendly and unambiguous
setup experience
* Developed with contributions from a diverse community of over 1,200
contributors
Users can choose plugins from a wide range of topics, including but not limited
to:
* Devices: [OPC UA][], [Modbus][]
* Logs: [File][], [Tail][], [Directory Monitor][]
* Messaging: [AMQP][], [Kafka][], [MQTT][]
* Monitoring: [OpenTelemetry][], [Prometheus][]
* Networking: [Cisco TelemetryMDT][], [gNMI][]
* System monitoring: [CPU][], [Memory][], [Disk][], [Network][], [SMART][],
[Docker][], [Nvidia SMI][], etc.
* Universal: [Exec][], [HTTP][], [HTTP Listener][], [SNMP][], [SQL][]
* Windows: [Event Log][], [Management Instrumentation][],
[Performance Counters][]
## 🔨 Installation
For binary builds, Docker images, RPM & DEB packages, and other builds of
Telegraf, please see the [install guide](/docs/INSTALL_GUIDE.md).
See the [releases documentation](/docs/RELEASES.md) for details on versioning
and when releases are made.
## 💻 Usage
Users define a TOML configuration with the plugins and settings they wish to
use, then pass that configuration to Telegraf. The Telegraf agent then
collects data from inputs at each interval and sends data to outputs at each
flush interval.
For a basic walkthrough see [quick start](/docs/QUICK_START.md).
## 📖 Documentation
For a full list of documentation including tutorials, reference and other
material, start with the [/docs directory](/docs/README.md).
Additionally, each plugin has its own README that includes details about how to
configure, use, and sometimes debug or troubleshoot. Look under the
[/plugins directory](/plugins/) for specific plugins.
Here are some commonly used documents:
* [Changelog](/CHANGELOG.md)
* [Configuration](/docs/CONFIGURATION.md)
* [FAQ](/docs/FAQ.md)
* [Releases](https://github.com/influxdata/telegraf/releases)
* [Security](/SECURITY.md)
## ❤️ Contribute
[![Contribute](https://img.shields.io/badge/contribute-to_telegraf-blue.svg?logo=influxdb)](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md)
We love our community of over 1,200 contributors! Many of the plugins included
in Telegraf were originally contributed by community members. Check out
our [contributing guide](CONTRIBUTING.md) if you are interested in helping out.
Also, join us on our [Community Slack](https://influxdata.com/slack) or
[Community Forums](https://community.influxdata.com/) if you have questions or
comments for our engineering teams.
If you are completely new to Telegraf and InfluxDB, you can also enroll for free
at [InfluxDB university](https://www.influxdata.com/university/) to take courses
to learn more.
## Support
[![Slack](https://img.shields.io/badge/slack-join_chat-blue.svg?logo=slack)](https://www.influxdata.com/slack)
[![Forums](https://img.shields.io/badge/discourse-join_forums-blue.svg?logo=discourse)](https://community.influxdata.com/)
Please use the [Community Slack](https://influxdata.com/slack) or
[Community Forums](https://community.influxdata.com/) if you have questions or
comments for our engineering teams. GitHub issues are limited to actual issues
and feature requests only.
## 📜 License
[![MIT](https://img.shields.io/badge/license-MIT-blue)](https://github.com/influxdata/telegraf/blob/master/LICENSE)
[OPC UA]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opcua
[Modbus]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/modbus
[File]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/file
[Tail]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail
[Directory Monitor]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/directory_monitor
[AMQP]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/amqp_consumer
[Kafka]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer
[MQTT]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer
[OpenTelemetry]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry
[Prometheus]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/prometheus
[Cisco TelemetryMDT]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cisco_telemetry_mdt
[gNMI]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/gnmi
[CPU]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cpu
[Memory]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mem
[Disk]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disk
[Network]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net
[SMART]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/smartctl
[Docker]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker
[Nvidia SMI]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nvidia_smi
[Exec]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec
[HTTP]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http
[HTTP Listener]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_listener_v2
[SNMP]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp
[SQL]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sql
[Event Log]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_eventlog
[Management Instrumentation]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_wmi
[Performance Counters]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters

11
SECURITY.md Normal file
View file

@ -0,0 +1,11 @@
# Security Policy
## Reporting a Vulnerability
InfluxData takes security and our users' trust seriously. If you believe you
have found a security issue in any of our open source projects, please
responsibly disclose it by contacting `security@influxdata.com`. More details
about security vulnerability reporting can be found on the
[InfluxData How to Report Vulnerabilities page][InfluxData Security].
[InfluxData Security]: https://www.influxdata.com/how-to-report-security-vulnerabilities/

93
accumulator.go Normal file
View file

@ -0,0 +1,93 @@
package telegraf
import (
"time"
)
// Accumulator allows adding metrics to the processing flow.
type Accumulator interface {
// AddFields adds a metric to the accumulator with the given measurement
// name, fields, and tags (and timestamp). If a timestamp is not provided,
// then the accumulator sets it to "now".
AddFields(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddGauge is the same as AddFields, but will add the metric as a "Gauge" type
AddGauge(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddCounter is the same as AddFields, but will add the metric as a "Counter" type
AddCounter(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddSummary is the same as AddFields, but will add the metric as a "Summary" type
AddSummary(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddHistogram is the same as AddFields, but will add the metric as a "Histogram" type
AddHistogram(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddMetric adds a metric to the accumulator.
AddMetric(Metric)
// SetPrecision sets the timestamp rounding precision. All metrics
// added to the accumulator will have their timestamp rounded to the
// nearest multiple of precision.
SetPrecision(precision time.Duration)
// Report an error.
AddError(err error)
// Upgrade to a TrackingAccumulator with space for maxTracked
// metrics/batches.
WithTracking(maxTracked int) TrackingAccumulator
}
// TrackingID uniquely identifies a tracked metric group
type TrackingID uint64
type TrackingData interface {
// ID is the TrackingID
ID() TrackingID
// RefCount is the number of tracking metrics still persistent and referencing this tracking ID
RefCount() int32
}
// DeliveryInfo provides the results of a delivered metric group.
type DeliveryInfo interface {
// ID is the TrackingID
ID() TrackingID
// Delivered returns true if the metric was processed successfully.
Delivered() bool
}
// TrackingAccumulator is an Accumulator that provides a signal when the
// metric has been fully processed. Sending more metrics than the accumulator
// has been allocated for without reading status from the Accepted or Rejected
// channels is an error.
type TrackingAccumulator interface {
Accumulator
// Add the Metric and arrange for tracking feedback after processing.
AddTrackingMetric(m Metric) TrackingID
// Add a group of Metrics and arrange for a signal when the group has been
// processed.
AddTrackingMetricGroup(group []Metric) TrackingID
// Delivered returns a channel that will contain the tracking results.
Delivered() <-chan DeliveryInfo
}

6
agent/README.md Normal file
View file

@ -0,0 +1,6 @@
# Agent
For a complete list of configuration options and details about the agent, please
see the [configuration][] document's agent section.
[configuration]: ../docs/CONFIGURATION.md#agent

160
agent/accumulator.go Normal file
View file

@ -0,0 +1,160 @@
package agent
import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
)
type MetricMaker interface {
LogName() string
MakeMetric(m telegraf.Metric) telegraf.Metric
Log() telegraf.Logger
}
type accumulator struct {
maker MetricMaker
metrics chan<- telegraf.Metric
precision time.Duration
}
func NewAccumulator(
maker MetricMaker,
metrics chan<- telegraf.Metric,
) telegraf.Accumulator {
acc := accumulator{
maker: maker,
metrics: metrics,
precision: time.Nanosecond,
}
return &acc
}
func (ac *accumulator) AddFields(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
ac.addMeasurement(measurement, tags, fields, telegraf.Untyped, t...)
}
func (ac *accumulator) AddGauge(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
ac.addMeasurement(measurement, tags, fields, telegraf.Gauge, t...)
}
func (ac *accumulator) AddCounter(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
ac.addMeasurement(measurement, tags, fields, telegraf.Counter, t...)
}
func (ac *accumulator) AddSummary(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
ac.addMeasurement(measurement, tags, fields, telegraf.Summary, t...)
}
func (ac *accumulator) AddHistogram(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
ac.addMeasurement(measurement, tags, fields, telegraf.Histogram, t...)
}
func (ac *accumulator) AddMetric(m telegraf.Metric) {
m.SetTime(m.Time().Round(ac.precision))
if m := ac.maker.MakeMetric(m); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) addMeasurement(
measurement string,
tags map[string]string,
fields map[string]interface{},
tp telegraf.ValueType,
t ...time.Time,
) {
m := metric.New(measurement, tags, fields, ac.getTime(t), tp)
if m := ac.maker.MakeMetric(m); m != nil {
ac.metrics <- m
}
}
// AddError passes a runtime error to the accumulator.
// The error will be tagged with the plugin name and written to the log.
func (ac *accumulator) AddError(err error) {
if err == nil {
return
}
ac.maker.Log().Errorf("Error in plugin: %v", err)
}
func (ac *accumulator) SetPrecision(precision time.Duration) {
ac.precision = precision
}
func (ac *accumulator) getTime(t []time.Time) time.Time {
var timestamp time.Time
if len(t) > 0 {
timestamp = t[0]
} else {
timestamp = time.Now()
}
return timestamp.Round(ac.precision)
}
func (ac *accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator {
return &trackingAccumulator{
Accumulator: ac,
delivered: make(chan telegraf.DeliveryInfo, maxTracked),
}
}
type trackingAccumulator struct {
telegraf.Accumulator
delivered chan telegraf.DeliveryInfo
}
func (a *trackingAccumulator) AddTrackingMetric(m telegraf.Metric) telegraf.TrackingID {
dm, id := metric.WithTracking(m, a.onDelivery)
a.AddMetric(dm)
return id
}
func (a *trackingAccumulator) AddTrackingMetricGroup(group []telegraf.Metric) telegraf.TrackingID {
db, id := metric.WithGroupTracking(group, a.onDelivery)
for _, m := range db {
a.AddMetric(m)
}
return id
}
func (a *trackingAccumulator) Delivered() <-chan telegraf.DeliveryInfo {
return a.delivered
}
func (a *trackingAccumulator) onDelivery(info telegraf.DeliveryInfo) {
select {
case a.delivered <- info:
default:
// This is a programming error in the input. More items were sent for
// tracking than space requested.
panic("channel is full")
}
}

160
agent/accumulator_test.go Normal file
View file

@ -0,0 +1,160 @@
package agent
import (
"bytes"
"errors"
"os"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/logger"
"github.com/influxdata/telegraf/testutil"
)
func TestAddFields(t *testing.T) {
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
tags := map[string]string{"foo": "bar"}
fields := map[string]interface{}{
"usage": float64(99),
}
now := time.Now()
a.AddCounter("acctest", fields, tags, now)
testm := <-metrics
require.Equal(t, "acctest", testm.Name())
actual, ok := testm.GetField("usage")
require.True(t, ok)
require.InDelta(t, float64(99), actual, testutil.DefaultDelta)
actual, ok = testm.GetTag("foo")
require.True(t, ok)
require.Equal(t, "bar", actual)
tm := testm.Time()
// okay if monotonic clock differs
require.True(t, now.Equal(tm))
tp := testm.Type()
require.Equal(t, telegraf.Counter, tp)
}
func TestAccAddError(t *testing.T) {
errBuf := bytes.NewBuffer(nil)
logger.RedirectLogging(errBuf)
defer logger.RedirectLogging(os.Stderr)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.AddError(errors.New("foo"))
a.AddError(errors.New("bar"))
a.AddError(errors.New("baz"))
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
require.Len(t, errs, 4) // 4 because of trailing newline
require.Contains(t, string(errs[0]), "TestPlugin")
require.Contains(t, string(errs[0]), "foo")
require.Contains(t, string(errs[1]), "TestPlugin")
require.Contains(t, string(errs[1]), "bar")
require.Contains(t, string(errs[2]), "TestPlugin")
require.Contains(t, string(errs[2]), "baz")
}
func TestSetPrecision(t *testing.T) {
tests := []struct {
name string
unset bool
precision time.Duration
timestamp time.Time
expected time.Time
}{
{
name: "default precision is nanosecond",
unset: true,
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
},
{
name: "second interval",
precision: time.Second,
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
expected: time.Date(2006, time.February, 10, 12, 0, 0, 0, time.UTC),
},
{
name: "microsecond interval",
precision: time.Microsecond,
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82913000, time.UTC),
},
{
name: "2 second precision",
precision: 2 * time.Second,
timestamp: time.Date(2006, time.February, 10, 12, 0, 2, 4, time.UTC),
expected: time.Date(2006, time.February, 10, 12, 0, 2, 0, time.UTC),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
metrics := make(chan telegraf.Metric, 10)
a := NewAccumulator(&TestMetricMaker{}, metrics)
if !tt.unset {
a.SetPrecision(tt.precision)
}
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{},
tt.timestamp,
)
testm := <-metrics
require.Equal(t, tt.expected, testm.Time())
close(metrics)
})
}
}
func TestAddTrackingMetricGroupEmpty(t *testing.T) {
ch := make(chan telegraf.Metric, 10)
metrics := make([]telegraf.Metric, 0)
acc := NewAccumulator(&TestMetricMaker{}, ch).WithTracking(1)
id := acc.AddTrackingMetricGroup(metrics)
select {
case tracking := <-acc.Delivered():
require.Equal(t, tracking.ID(), id)
default:
t.Fatal("empty group should be delivered immediately")
}
}
type TestMetricMaker struct {
}
func (*TestMetricMaker) Name() string {
return "TestPlugin"
}
func (tm *TestMetricMaker) LogName() string {
return tm.Name()
}
func (*TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric {
return metric
}
func (*TestMetricMaker) Log() telegraf.Logger {
return logger.New("TestPlugin", "test", "")
}

1215
agent/agent.go Normal file

File diff suppressed because it is too large Load diff

19
agent/agent_posix.go Normal file
View file

@ -0,0 +1,19 @@
//go:build !windows
package agent
import (
"os"
"os/signal"
"syscall"
)
const flushSignal = syscall.SIGUSR1
func watchForFlushSignal(flushRequested chan os.Signal) {
signal.Notify(flushRequested, flushSignal)
}
func stopListeningForFlushSignal(flushRequested chan os.Signal) {
signal.Stop(flushRequested)
}

259
agent/agent_test.go Normal file
View file

@ -0,0 +1,259 @@
package agent
import (
"context"
"fmt"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/models"
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
_ "github.com/influxdata/telegraf/plugins/inputs/all"
_ "github.com/influxdata/telegraf/plugins/outputs/all"
"github.com/influxdata/telegraf/plugins/parsers/influx"
_ "github.com/influxdata/telegraf/plugins/processors/all"
"github.com/influxdata/telegraf/testutil"
)
func TestAgent_OmitHostname(t *testing.T) {
c := config.NewConfig()
c.Agent.OmitHostname = true
_ = NewAgent(c)
require.NotContains(t, c.Tags, "host")
}
func TestAgent_LoadPlugin(t *testing.T) {
c := config.NewConfig()
c.InputFilters = []string{"mysql"}
err := c.LoadConfig("../config/testdata/telegraf-agent.toml")
require.NoError(t, err)
a := NewAgent(c)
require.Len(t, a.Config.Inputs, 1)
c = config.NewConfig()
c.InputFilters = []string{"foo"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
require.NoError(t, err)
a = NewAgent(c)
require.Empty(t, a.Config.Inputs)
c = config.NewConfig()
c.InputFilters = []string{"mysql", "foo"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
require.NoError(t, err)
a = NewAgent(c)
require.Len(t, a.Config.Inputs, 1)
c = config.NewConfig()
c.InputFilters = []string{"mysql", "redis"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
require.NoError(t, err)
a = NewAgent(c)
require.Len(t, a.Config.Inputs, 2)
c = config.NewConfig()
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
require.NoError(t, err)
a = NewAgent(c)
require.Len(t, a.Config.Inputs, 2)
}
func TestAgent_LoadOutput(t *testing.T) {
c := config.NewConfig()
c.OutputFilters = []string{"influxdb"}
err := c.LoadConfig("../config/testdata/telegraf-agent.toml")
require.NoError(t, err)
a := NewAgent(c)
require.Len(t, a.Config.Outputs, 2)
c = config.NewConfig()
c.OutputFilters = []string{"kafka"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
require.NoError(t, err)
a = NewAgent(c)
require.Len(t, a.Config.Outputs, 1)
c = config.NewConfig()
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
require.NoError(t, err)
a = NewAgent(c)
require.Len(t, a.Config.Outputs, 3)
c = config.NewConfig()
c.OutputFilters = []string{"foo"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
require.NoError(t, err)
a = NewAgent(c)
require.Empty(t, a.Config.Outputs)
c = config.NewConfig()
c.OutputFilters = []string{"influxdb", "foo"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
require.NoError(t, err)
a = NewAgent(c)
require.Len(t, a.Config.Outputs, 2)
c = config.NewConfig()
c.OutputFilters = []string{"influxdb", "kafka"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
require.NoError(t, err)
require.Len(t, c.Outputs, 3)
a = NewAgent(c)
require.Len(t, a.Config.Outputs, 3)
c = config.NewConfig()
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
require.NoError(t, err)
a = NewAgent(c)
require.Len(t, a.Config.Outputs, 3)
}
func TestWindow(t *testing.T) {
parse := func(s string) time.Time {
tm, err := time.Parse(time.RFC3339, s)
if err != nil {
panic(err)
}
return tm
}
tests := []struct {
name string
start time.Time
roundInterval bool
period time.Duration
since time.Time
until time.Time
}{
{
name: "round with exact alignment",
start: parse("2018-03-27T00:00:00Z"),
roundInterval: true,
period: 30 * time.Second,
since: parse("2018-03-27T00:00:00Z"),
until: parse("2018-03-27T00:00:30Z"),
},
{
name: "round with alignment needed",
start: parse("2018-03-27T00:00:05Z"),
roundInterval: true,
period: 30 * time.Second,
since: parse("2018-03-27T00:00:00Z"),
until: parse("2018-03-27T00:00:30Z"),
},
{
name: "no round with exact alignment",
start: parse("2018-03-27T00:00:00Z"),
roundInterval: false,
period: 30 * time.Second,
since: parse("2018-03-27T00:00:00Z"),
until: parse("2018-03-27T00:00:30Z"),
},
{
name: "no found with alignment needed",
start: parse("2018-03-27T00:00:05Z"),
roundInterval: false,
period: 30 * time.Second,
since: parse("2018-03-27T00:00:05Z"),
until: parse("2018-03-27T00:00:35Z"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
since, until := updateWindow(tt.start, tt.roundInterval, tt.period)
require.Equal(t, tt.since, since, "since")
require.Equal(t, tt.until, until, "until")
})
}
}
func TestCases(t *testing.T) {
// Get all directories in testcases
folders, err := os.ReadDir("testcases")
require.NoError(t, err)
// Make sure tests contains data
require.NotEmpty(t, folders)
for _, f := range folders {
// Only handle folders
if !f.IsDir() {
continue
}
fname := f.Name()
testdataPath := filepath.Join("testcases", fname)
configFilename := filepath.Join(testdataPath, "telegraf.conf")
expectedFilename := filepath.Join(testdataPath, "expected.out")
t.Run(fname, func(t *testing.T) {
// Get parser to parse input and expected output
parser := &influx.Parser{}
require.NoError(t, parser.Init())
expected, err := testutil.ParseMetricsFromFile(expectedFilename, parser)
require.NoError(t, err)
require.NotEmpty(t, expected)
// Load the config and inject the mock output to be able to verify
// the resulting metrics
cfg := config.NewConfig()
require.NoError(t, cfg.LoadAll(configFilename))
require.Empty(t, cfg.Outputs, "No output(s) allowed in the config!")
// Setup the agent and run the agent in "once" mode
agent := NewAgent(cfg)
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
defer cancel()
actual, err := collect(ctx, agent, 0)
require.NoError(t, err)
// Process expected metrics and compare with resulting metrics
options := []cmp.Option{
testutil.IgnoreTags("host"),
testutil.IgnoreTime(),
}
testutil.RequireMetricsEqual(t, expected, actual, options...)
})
}
}
// Implement a "test-mode" like call but collect the metrics
func collect(ctx context.Context, a *Agent, wait time.Duration) ([]telegraf.Metric, error) {
var received []telegraf.Metric
var mu sync.Mutex
src := make(chan telegraf.Metric, 100)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for m := range src {
mu.Lock()
received = append(received, m)
mu.Unlock()
m.Reject()
}
}()
if err := a.runTest(ctx, wait, src); err != nil {
return nil, err
}
wg.Wait()
if models.GlobalGatherErrors.Get() != 0 {
return received, fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get())
}
return received, nil
}

13
agent/agent_windows.go Normal file
View file

@ -0,0 +1,13 @@
//go:build windows
package agent
import "os"
func watchForFlushSignal(_ chan os.Signal) {
// not supported
}
func stopListeningForFlushSignal(_ chan os.Signal) {
// not supported
}

View file

@ -0,0 +1,2 @@
metric value=420
metric value_min=4200,value_max=4200

View file

@ -0,0 +1 @@
metric value=42.0

View file

@ -0,0 +1,22 @@
# Test for not skipping processors after running aggregators
[agent]
omit_hostname = true
skip_processors_after_aggregators = false
[[inputs.file]]
files = ["testcases/aggregators-rerun-processors/input.influx"]
data_format = "influx"
[[processors.starlark]]
source = '''
def apply(metric):
for k, v in metric.fields.items():
if type(v) == "float":
metric.fields[k] = v * 10
return metric
'''
[[aggregators.minmax]]
period = "1s"
drop_original = false

View file

@ -0,0 +1,2 @@
metric value=420
metric value_min=420,value_max=420

View file

@ -0,0 +1 @@
metric value=42.0

View file

@ -0,0 +1,22 @@
# Test for skipping processors after running aggregators
[agent]
omit_hostname = true
skip_processors_after_aggregators = true
[[inputs.file]]
files = ["testcases/aggregators-skip-processors/input.influx"]
data_format = "influx"
[[processors.starlark]]
source = '''
def apply(metric):
for k, v in metric.fields.items():
if type(v) == "float":
metric.fields[k] = v * 10
return metric
'''
[[aggregators.minmax]]
period = "1s"
drop_original = false

View file

@ -0,0 +1,2 @@
new_metric_from_starlark,foo=bar baz=42i,timestamp="2023-07-13T12:53:54.197709713Z" 1689252834197709713
old_metric_from_mock,mood=good value=23i,timestamp="2023-07-13T13:10:34Z" 1689253834000000000

View file

@ -0,0 +1 @@
old_metric_from_mock,mood=good value=23i 1689253834000000000

View file

@ -0,0 +1,26 @@
# Test for using the appearance order in the file for processor order
[[inputs.file]]
files = ["testcases/processor-order-appearance/input.influx"]
data_format = "influx"
[[processors.starlark]]
source = '''
def apply(metric):
metrics = []
m = Metric("new_metric_from_starlark")
m.tags["foo"] = "bar"
m.fields["baz"] = 42
m.time = 1689252834197709713
metrics.append(m)
metrics.append(metric)
return metrics
'''
[[processors.date]]
field_key = "timestamp"
date_format = "2006-01-02T15:04:05.999999999Z"
timezone = "UTC"

View file

@ -0,0 +1,2 @@
new_metric_from_starlark,foo=bar baz=42i,timestamp="2023-07-13T12:53:54.197709713Z" 1689252834197709713
old_metric_from_mock,mood=good value=23i,timestamp="2023-07-13T13:10:34Z" 1689253834000000000

View file

@ -0,0 +1 @@
old_metric_from_mock,mood=good value=23i 1689253834000000000

View file

@ -0,0 +1,27 @@
# Test for specifying an explicit processor order
[[inputs.file]]
files = ["testcases/processor-order-explicit/input.influx"]
data_format = "influx"
[[processors.date]]
field_key = "timestamp"
date_format = "2006-01-02T15:04:05.999999999Z"
timezone = "UTC"
order = 2
[[processors.starlark]]
source = '''
def apply(metric):
metrics = []
m = Metric("new_metric_from_starlark")
m.tags["foo"] = "bar"
m.fields["baz"] = 42
m.time = 1689252834197709713
metrics.append(m)
metrics.append(metric)
return metrics
'''
order = 1

View file

@ -0,0 +1,2 @@
new_metric_from_starlark,foo=bar baz=42i,timestamp="2023-07-13T12:53:54.197709713Z" 1689252834197709713
old_metric_from_mock,mood=good value=23i,timestamp="2023-07-13T13:10:34Z" 1689253834000000000

View file

@ -0,0 +1 @@
old_metric_from_mock,mood=good value=23i 1689253834000000000

View file

@ -0,0 +1,25 @@
# Test for using the appearance order in the file for processor order
[[inputs.file]]
files = ["testcases/processor-order-appearance/input.influx"]
data_format = "influx"
[[processors.starlark]]
source = '''
def apply(metric):
metrics = []
m = Metric("new_metric_from_starlark")
m.tags["foo"] = "bar"
m.fields["baz"] = 42
m.time = 1689252834197709713
metrics.append(m)
metrics.append(metric)
return metrics
'''
[[processors.date]]
field_key = "timestamp"
date_format = "2006-01-02T15:04:05.999999999Z"
timezone = "UTC"
order = 1

View file

@ -0,0 +1,2 @@
new_metric_from_starlark,foo=bar baz=42i 1689252834197709713
old_metric_from_mock,mood=good value=23i,timestamp="2023-07-13T13:10:34Z" 1689253834000000000

View file

@ -0,0 +1 @@
old_metric_from_mock,mood=good value=23i 1689253834000000000

View file

@ -0,0 +1,26 @@
# Test for using the appearance order in the file for processor order.
# This will not add the "timestamp" field as the starlark processor runs _after_
# the date processor.
[[inputs.file]]
files = ["testcases/processor-order-no-starlark/input.influx"]
data_format = "influx"
[[processors.date]]
field_key = "timestamp"
date_format = "2006-01-02T15:04:05.999999999Z"
timezone = "UTC"
[[processors.starlark]]
source = '''
def apply(metric):
metrics = []
m = Metric("new_metric_from_starlark")
m.tags["foo"] = "bar"
m.fields["baz"] = 42
m.time = 1689252834197709713
metrics.append(m)
metrics.append(metric)
return metrics
'''

281
agent/tick.go Normal file
View file

@ -0,0 +1,281 @@
package agent
import (
"context"
"sync"
"time"
"github.com/benbjohnson/clock"
"github.com/influxdata/telegraf/internal"
)
type Ticker interface {
Elapsed() <-chan time.Time
Stop()
}
// AlignedTicker delivers ticks at aligned times plus an optional jitter. Each
// tick is realigned to avoid drift and handle changes to the system clock.
//
// The ticks may have an jitter duration applied to them as an random offset to
// the interval. However the overall pace of is that of the interval, so on
// average you will have one collection each interval.
//
// The first tick is emitted at the next alignment.
//
// Ticks are dropped for slow consumers.
//
// The implementation currently does not recalculate until the next tick with
// no maximum sleep, when using large intervals alignment is not corrected
// until the next tick.
type AlignedTicker struct {
interval time.Duration
jitter time.Duration
offset time.Duration
minInterval time.Duration
ch chan time.Time
cancel context.CancelFunc
wg sync.WaitGroup
}
func NewAlignedTicker(now time.Time, interval, jitter, offset time.Duration) *AlignedTicker {
t := &AlignedTicker{
interval: interval,
jitter: jitter,
offset: offset,
minInterval: interval / 100,
}
t.start(now, clock.New())
return t
}
func (t *AlignedTicker) start(now time.Time, clk clock.Clock) {
t.ch = make(chan time.Time, 1)
ctx, cancel := context.WithCancel(context.Background())
t.cancel = cancel
d := t.next(now)
timer := clk.Timer(d)
t.wg.Add(1)
go func() {
defer t.wg.Done()
t.run(ctx, timer)
}()
}
func (t *AlignedTicker) next(now time.Time) time.Duration {
// Add minimum interval size to avoid scheduling an interval that is
// exceptionally short. This avoids an issue that can occur where the
// previous interval ends slightly early due to very minor clock changes.
next := now.Add(t.minInterval)
next = internal.AlignTime(next, t.interval)
d := next.Sub(now)
if d == 0 {
d = t.interval
}
d += t.offset
d += internal.RandomDuration(t.jitter)
return d
}
func (t *AlignedTicker) run(ctx context.Context, timer *clock.Timer) {
for {
select {
case <-ctx.Done():
timer.Stop()
return
case now := <-timer.C:
select {
case t.ch <- now:
default:
}
d := t.next(now)
timer.Reset(d)
}
}
}
func (t *AlignedTicker) Elapsed() <-chan time.Time {
return t.ch
}
func (t *AlignedTicker) Stop() {
t.cancel()
t.wg.Wait()
}
// UnalignedTicker delivers ticks at regular but unaligned intervals. No
// effort is made to avoid drift.
//
// The ticks may have an jitter duration applied to them as an random offset to
// the interval. However the overall pace of is that of the interval, so on
// average you will have one collection each interval.
//
// The first tick is emitted immediately.
//
// Ticks are dropped for slow consumers.
type UnalignedTicker struct {
interval time.Duration
jitter time.Duration
offset time.Duration
ch chan time.Time
cancel context.CancelFunc
wg sync.WaitGroup
}
func NewUnalignedTicker(interval, jitter, offset time.Duration) *UnalignedTicker {
t := &UnalignedTicker{
interval: interval,
jitter: jitter,
offset: offset,
}
t.start(clock.New())
return t
}
func (t *UnalignedTicker) start(clk clock.Clock) {
t.ch = make(chan time.Time, 1)
ctx, cancel := context.WithCancel(context.Background())
t.cancel = cancel
ticker := clk.Ticker(t.interval)
if t.offset == 0 {
// Perform initial trigger to stay backward compatible
t.ch <- clk.Now()
}
t.wg.Add(1)
go func() {
defer t.wg.Done()
t.run(ctx, ticker, clk)
}()
}
func sleep(ctx context.Context, duration time.Duration, clk clock.Clock) error {
if duration == 0 {
return nil
}
t := clk.Timer(duration)
select {
case <-t.C:
return nil
case <-ctx.Done():
t.Stop()
return ctx.Err()
}
}
func (t *UnalignedTicker) run(ctx context.Context, ticker *clock.Ticker, clk clock.Clock) {
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case <-ticker.C:
jitter := internal.RandomDuration(t.jitter)
err := sleep(ctx, t.offset+jitter, clk)
if err != nil {
ticker.Stop()
return
}
select {
case t.ch <- clk.Now():
default:
}
}
}
}
func (t *UnalignedTicker) InjectTick() {
t.ch <- time.Now()
}
func (t *UnalignedTicker) Elapsed() <-chan time.Time {
return t.ch
}
func (t *UnalignedTicker) Stop() {
t.cancel()
t.wg.Wait()
}
// RollingTicker delivers ticks at regular but unaligned intervals.
//
// Because the next interval is scheduled based on the interval + jitter, you
// are guaranteed at least interval seconds without missing a tick and ticks
// will be evenly scheduled over time.
//
// On average you will have one collection each interval + (jitter/2).
//
// The first tick is emitted after interval+jitter seconds.
//
// Ticks are dropped for slow consumers.
type RollingTicker struct {
interval time.Duration
jitter time.Duration
ch chan time.Time
cancel context.CancelFunc
wg sync.WaitGroup
}
func NewRollingTicker(interval, jitter time.Duration) *RollingTicker {
t := &RollingTicker{
interval: interval,
jitter: jitter,
}
t.start(clock.New())
return t
}
func (t *RollingTicker) start(clk clock.Clock) {
t.ch = make(chan time.Time, 1)
ctx, cancel := context.WithCancel(context.Background())
t.cancel = cancel
d := t.next()
timer := clk.Timer(d)
t.wg.Add(1)
go func() {
defer t.wg.Done()
t.run(ctx, timer)
}()
}
func (t *RollingTicker) next() time.Duration {
return t.interval + internal.RandomDuration(t.jitter)
}
func (t *RollingTicker) run(ctx context.Context, timer *clock.Timer) {
for {
select {
case <-ctx.Done():
timer.Stop()
return
case now := <-timer.C:
select {
case t.ch <- now:
default:
}
d := t.next()
timer.Reset(d)
}
}
}
func (t *RollingTicker) Elapsed() <-chan time.Time {
return t.ch
}
func (t *RollingTicker) Stop() {
t.cancel()
t.wg.Wait()
}

395
agent/tick_test.go Normal file
View file

@ -0,0 +1,395 @@
package agent
import (
"fmt"
"strings"
"testing"
"time"
"github.com/benbjohnson/clock"
"github.com/stretchr/testify/require"
)
func TestAlignedTicker(t *testing.T) {
interval := 10 * time.Second
jitter := 0 * time.Second
offset := 0 * time.Second
clk := clock.NewMock()
since := clk.Now()
until := since.Add(60 * time.Second)
ticker := &AlignedTicker{
interval: interval,
jitter: jitter,
offset: offset,
minInterval: interval / 100,
}
ticker.start(since, clk)
defer ticker.Stop()
expected := []time.Time{
time.Unix(10, 0).UTC(),
time.Unix(20, 0).UTC(),
time.Unix(30, 0).UTC(),
time.Unix(40, 0).UTC(),
time.Unix(50, 0).UTC(),
time.Unix(60, 0).UTC(),
}
actual := make([]time.Time, 0)
clk.Add(10 * time.Second)
for !clk.Now().After(until) {
tm := <-ticker.Elapsed()
actual = append(actual, tm.UTC())
clk.Add(10 * time.Second)
}
require.Equal(t, expected, actual)
}
func TestAlignedTickerJitter(t *testing.T) {
interval := 10 * time.Second
jitter := 5 * time.Second
offset := 0 * time.Second
clk := clock.NewMock()
since := clk.Now()
until := since.Add(61 * time.Second)
ticker := &AlignedTicker{
interval: interval,
jitter: jitter,
offset: offset,
minInterval: interval / 100,
}
ticker.start(since, clk)
defer ticker.Stop()
last := since
for !clk.Now().After(until) {
select {
case tm := <-ticker.Elapsed():
dur := tm.Sub(last)
// 10s interval + 5s jitter + up to 1s late firing.
require.LessOrEqual(t, dur, 16*time.Second, "expected elapsed time to be less than 16 seconds, but was %s", dur)
require.GreaterOrEqual(t, dur, 5*time.Second, "expected elapsed time to be more than 5 seconds, but was %s", dur)
last = last.Add(interval)
default:
}
clk.Add(1 * time.Second)
}
}
func TestAlignedTickerOffset(t *testing.T) {
interval := 10 * time.Second
jitter := 0 * time.Second
offset := 3 * time.Second
clk := clock.NewMock()
since := clk.Now()
until := since.Add(61 * time.Second)
ticker := &AlignedTicker{
interval: interval,
jitter: jitter,
offset: offset,
minInterval: interval / 100,
}
ticker.start(since, clk)
defer ticker.Stop()
expected := []time.Time{
time.Unix(13, 0).UTC(),
time.Unix(23, 0).UTC(),
time.Unix(33, 0).UTC(),
time.Unix(43, 0).UTC(),
time.Unix(53, 0).UTC(),
}
actual := make([]time.Time, 0)
clk.Add(10*time.Second + offset)
for !clk.Now().After(until) {
tm := <-ticker.Elapsed()
actual = append(actual, tm.UTC())
clk.Add(10 * time.Second)
}
require.Equal(t, expected, actual)
}
func TestAlignedTickerMissedTick(t *testing.T) {
interval := 10 * time.Second
jitter := 0 * time.Second
offset := 0 * time.Second
clk := clock.NewMock()
since := clk.Now()
ticker := &AlignedTicker{
interval: interval,
jitter: jitter,
offset: offset,
minInterval: interval / 100,
}
ticker.start(since, clk)
defer ticker.Stop()
clk.Add(25 * time.Second)
tm := <-ticker.Elapsed()
require.Equal(t, time.Unix(10, 0).UTC(), tm.UTC())
clk.Add(5 * time.Second)
tm = <-ticker.Elapsed()
require.Equal(t, time.Unix(30, 0).UTC(), tm.UTC())
}
func TestUnalignedTicker(t *testing.T) {
interval := 10 * time.Second
jitter := 0 * time.Second
offset := 0 * time.Second
clk := clock.NewMock()
clk.Add(1 * time.Second)
since := clk.Now()
until := since.Add(60 * time.Second)
ticker := &UnalignedTicker{
interval: interval,
jitter: jitter,
offset: offset,
}
ticker.start(clk)
defer ticker.Stop()
expected := []time.Time{
time.Unix(1, 0).UTC(),
time.Unix(11, 0).UTC(),
time.Unix(21, 0).UTC(),
time.Unix(31, 0).UTC(),
time.Unix(41, 0).UTC(),
time.Unix(51, 0).UTC(),
time.Unix(61, 0).UTC(),
}
actual := make([]time.Time, 0)
for !clk.Now().After(until) {
select {
case tm := <-ticker.Elapsed():
actual = append(actual, tm.UTC())
default:
}
clk.Add(10 * time.Second)
}
require.Equal(t, expected, actual)
}
func TestRollingTicker(t *testing.T) {
interval := 10 * time.Second
jitter := 0 * time.Second
offset := 0 * time.Second
clk := clock.NewMock()
clk.Add(1 * time.Second)
since := clk.Now()
until := since.Add(60 * time.Second)
ticker := &UnalignedTicker{
interval: interval,
jitter: jitter,
offset: offset,
}
ticker.start(clk)
defer ticker.Stop()
expected := []time.Time{
time.Unix(1, 0).UTC(),
time.Unix(11, 0).UTC(),
time.Unix(21, 0).UTC(),
time.Unix(31, 0).UTC(),
time.Unix(41, 0).UTC(),
time.Unix(51, 0).UTC(),
time.Unix(61, 0).UTC(),
}
actual := make([]time.Time, 0)
for !clk.Now().After(until) {
select {
case tm := <-ticker.Elapsed():
actual = append(actual, tm.UTC())
default:
}
clk.Add(10 * time.Second)
}
require.Equal(t, expected, actual)
}
// Simulates running the Ticker for an hour and displays stats about the
// operation.
func TestAlignedTickerDistribution(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
interval := 10 * time.Second
jitter := 5 * time.Second
offset := 0 * time.Second
clk := clock.NewMock()
since := clk.Now()
ticker := &AlignedTicker{
interval: interval,
jitter: jitter,
offset: offset,
minInterval: interval / 100,
}
ticker.start(since, clk)
defer ticker.Stop()
dist := simulatedDist(ticker, clk)
printDist(dist)
require.Less(t, 350, dist.Count)
require.True(t, 9 < dist.Mean() && dist.Mean() < 11)
}
func TestAlignedTickerDistributionWithOffset(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
interval := 10 * time.Second
jitter := 5 * time.Second
offset := 3 * time.Second
clk := clock.NewMock()
since := clk.Now()
ticker := &AlignedTicker{
interval: interval,
jitter: jitter,
offset: offset,
minInterval: interval / 100,
}
ticker.start(since, clk)
defer ticker.Stop()
dist := simulatedDist(ticker, clk)
printDist(dist)
require.Less(t, 350, dist.Count)
require.True(t, 9 < dist.Mean() && dist.Mean() < 11)
}
// Simulates running the Ticker for an hour and displays stats about the
// operation.
func TestUnalignedTickerDistribution(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
interval := 10 * time.Second
jitter := 5 * time.Second
offset := 0 * time.Second
clk := clock.NewMock()
ticker := &UnalignedTicker{
interval: interval,
jitter: jitter,
offset: offset,
}
ticker.start(clk)
defer ticker.Stop()
dist := simulatedDist(ticker, clk)
printDist(dist)
require.Less(t, 350, dist.Count)
require.True(t, 9 < dist.Mean() && dist.Mean() < 11)
}
func TestUnalignedTickerDistributionWithOffset(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
interval := 10 * time.Second
jitter := 5 * time.Second
offset := 3 * time.Second
clk := clock.NewMock()
ticker := &UnalignedTicker{
interval: interval,
jitter: jitter,
offset: offset,
}
ticker.start(clk)
defer ticker.Stop()
dist := simulatedDist(ticker, clk)
printDist(dist)
require.Less(t, 350, dist.Count)
require.True(t, 9 < dist.Mean() && dist.Mean() < 11)
}
// Simulates running the Ticker for an hour and displays stats about the
// operation.
func TestRollingTickerDistribution(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
interval := 10 * time.Second
jitter := 5 * time.Second
clk := clock.NewMock()
ticker := &RollingTicker{
interval: interval,
jitter: jitter,
}
ticker.start(clk)
defer ticker.Stop()
dist := simulatedDist(ticker, clk)
printDist(dist)
require.Less(t, 275, dist.Count)
require.True(t, 12 < dist.Mean() && 13 > dist.Mean())
}
type Distribution struct {
Buckets [60]int
Count int
Waittime float64
}
func (d *Distribution) Mean() float64 {
return d.Waittime / float64(d.Count)
}
func printDist(dist Distribution) {
for i, count := range dist.Buckets {
fmt.Printf("%2d %s\n", i, strings.Repeat("x", count))
}
fmt.Printf("Average interval: %f\n", dist.Mean())
fmt.Printf("Count: %d\n", dist.Count)
}
func simulatedDist(ticker Ticker, clk *clock.Mock) Distribution {
since := clk.Now()
until := since.Add(1 * time.Hour)
var dist Distribution
last := clk.Now()
for !clk.Now().After(until) {
select {
case tm := <-ticker.Elapsed():
dist.Buckets[tm.Second()]++
dist.Count++
dist.Waittime += tm.Sub(last).Seconds()
last = tm
default:
clk.Add(1 * time.Second)
}
}
return dist
}

18
aggregator.go Normal file
View file

@ -0,0 +1,18 @@
package telegraf
// Aggregator is an interface for implementing an Aggregator plugin.
// the RunningAggregator wraps this interface and guarantees that
// Add, Push, and Reset can not be called concurrently, so locking is not
// required when implementing an Aggregator plugin.
type Aggregator interface {
PluginDescriber
// Add the metric to the aggregator.
Add(in Metric)
// Push pushes the current aggregates to the accumulator.
Push(acc Accumulator)
// Reset resets the aggregators caches and aggregates.
Reset()
}

BIN
assets/GopherAndTiger.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

BIN
assets/TelegrafTiger.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

BIN
assets/windows/icon.icns Normal file

Binary file not shown.

BIN
assets/windows/tiger.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

1
build_version.txt Normal file
View file

@ -0,0 +1 @@
1.34.4

1
cmd/telegraf/README.md Symbolic link
View file

@ -0,0 +1 @@
../../docs/COMMANDS_AND_FLAGS.md

104
cmd/telegraf/agent.conf Normal file
View file

@ -0,0 +1,104 @@
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Collection offset is used to shift the collection by the given amount.
## This can be be used to avoid many plugins querying constraint devices
## at the same time by manually scheduling them in time.
# collection_offset = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Collected metrics are rounded to the precision specified. Precision is
## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
##
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s:
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
##
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
precision = "0s"
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log format controls the way messages are logged and can be one of "text",
## "structured" or, on Windows, "eventlog".
# logformat = "text"
## Message key for structured logs, to override the default of "msg".
## Ignored if `logformat` is not "structured".
# structured_log_message_key = "message"
## Name of the file to be logged to or stderr if unset or empty. This
## setting is ignored for the "eventlog" format.
# logfile = ""
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0h"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
# hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
# omit_hostname = false
## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which
## translates by calling external programs snmptranslate and snmptable,
## or "gosmi" which translates using the built-in gosmi library.
# snmp_translator = "netsnmp"
## Name of the file to load the state of plugins from and store the state to.
## If uncommented and not empty, this file will be used to save the state of
## stateful plugins on termination of Telegraf. If the file exists on start,
## the state in the file will be restored for the plugins.
# statefile = ""
## Flag to skip running processors after aggregators
## By default, processors are run a second time after aggregators. Changing
## this setting to true will skip the second run of processors.
# skip_processors_after_aggregators = false

240
cmd/telegraf/cmd_config.go Normal file
View file

@ -0,0 +1,240 @@
// Command handling for configuration "config" command
package main
import (
"errors"
"fmt"
"io"
"log"
"net/url"
"os"
"path/filepath"
"github.com/fatih/color"
"github.com/urfave/cli/v2"
"github.com/influxdata/telegraf/agent"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/logger"
"github.com/influxdata/telegraf/migrations"
)
func getConfigCommands(configHandlingFlags []cli.Flag, outputBuffer io.Writer) []*cli.Command {
return []*cli.Command{
{
Name: "config",
Usage: "commands for generating and migrating configurations",
Flags: configHandlingFlags,
Action: func(cCtx *cli.Context) error {
// The sub_Filters are populated when the filter flags are set after the subcommand config
// e.g. telegraf config --section-filter inputs
filters := processFilterFlags(cCtx)
printSampleConfig(outputBuffer, filters)
return nil
},
Subcommands: []*cli.Command{
{
Name: "check",
Usage: "check configuration file(s) for issues",
Description: `
The 'check' command reads the configuration files specified via '--config' or
'--config-directory' and tries to initialize, but not start, the plugins.
Syntax and semantic errors detectable without starting the plugins will
be reported.
If no configuration file is explicitly specified the command reads the
default locations and uses those configuration files.
To check the file 'mysettings.conf' use
> telegraf config check --config mysettings.conf
`,
Flags: configHandlingFlags,
Action: func(cCtx *cli.Context) error {
// Setup logging
logConfig := &logger.Config{Debug: cCtx.Bool("debug")}
if err := logger.SetupLogging(logConfig); err != nil {
return err
}
// Collect the given configuration files
configFiles := cCtx.StringSlice("config")
configDir := cCtx.StringSlice("config-directory")
for _, fConfigDirectory := range configDir {
files, err := config.WalkDirectory(fConfigDirectory)
if err != nil {
return err
}
configFiles = append(configFiles, files...)
}
// If no "config" or "config-directory" flag(s) was
// provided we should load default configuration files
if len(configFiles) == 0 {
paths, err := config.GetDefaultConfigPath()
if err != nil {
return err
}
configFiles = paths
}
// Load the config and try to initialize the plugins
c := config.NewConfig()
c.Agent.Quiet = cCtx.Bool("quiet")
if err := c.LoadAll(configFiles...); err != nil {
return err
}
ag := agent.NewAgent(c)
// Set the default for processor skipping
if c.Agent.SkipProcessorsAfterAggregators == nil {
msg := `The default value of 'skip_processors_after_aggregators' will change to 'true' with Telegraf v1.40.0! `
msg += `If you need the current default behavior, please explicitly set the option to 'false'!`
log.Print("W! [agent] ", color.YellowString(msg))
skipProcessorsAfterAggregators := false
c.Agent.SkipProcessorsAfterAggregators = &skipProcessorsAfterAggregators
}
return ag.InitPlugins()
},
},
{
Name: "create",
Usage: "create a full sample configuration and show it",
Description: `
The 'create' produces a full configuration containing all plugins as an example
and shows it on the console. You may apply 'section' or 'plugin' filtering
to reduce the output to the plugins you need
Create the full configuration
> telegraf config create
To produce a configuration only containing a Modbus input plugin and an
InfluxDB v2 output plugin use
> telegraf config create --section-filter "inputs:outputs" --input-filter "modbus" --output-filter "influxdb_v2"
`,
Flags: configHandlingFlags,
Action: func(cCtx *cli.Context) error {
filters := processFilterFlags(cCtx)
printSampleConfig(outputBuffer, filters)
return nil
},
},
{
Name: "migrate",
Usage: "migrate deprecated plugins and options of the configuration(s)",
Description: `
The 'migrate' command reads the configuration files specified via '--config' or
'--config-directory' and tries to migrate plugins or options that are currently
deprecated using the recommended replacements. If no configuration file is
explicitly specified the command reads the default locations and uses those
configuration files. Migrated files are stored with a '.migrated' suffix at the
location of the inputs. If you are migrating remote configurations the migrated
configurations is stored in the current directory using the filename of the URL
with a '.migrated' suffix.
It is highly recommended to test those migrated configurations before using
those files unattended!
To migrate the file 'mysettings.conf' use
> telegraf config migrate --config mysettings.conf
`,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "force",
Usage: "forces overwriting of an existing migration file",
},
},
Action: func(cCtx *cli.Context) error {
// Setup logging
logConfig := &logger.Config{Debug: cCtx.Bool("debug")}
if err := logger.SetupLogging(logConfig); err != nil {
return err
}
// Check if we have migrations at all. There might be
// none if you run a custom build without migrations
// enabled.
if len(migrations.PluginMigrations) == 0 {
return errors.New("no migrations available")
}
log.Printf("%d plugin migration(s) available", len(migrations.PluginMigrations))
// Collect the given configuration files
configFiles := cCtx.StringSlice("config")
configDir := cCtx.StringSlice("config-directory")
for _, fConfigDirectory := range configDir {
files, err := config.WalkDirectory(fConfigDirectory)
if err != nil {
return err
}
configFiles = append(configFiles, files...)
}
// If no "config" or "config-directory" flag(s) was
// provided we should load default configuration files
if len(configFiles) == 0 {
paths, err := config.GetDefaultConfigPath()
if err != nil {
return err
}
configFiles = paths
}
for _, fn := range configFiles {
log.Printf("D! Trying to migrate %q...", fn)
// Read and parse the config file
data, remote, err := config.LoadConfigFile(fn)
if err != nil {
return fmt.Errorf("opening input %q failed: %w", fn, err)
}
out, applied, err := config.ApplyMigrations(data)
if err != nil {
return err
}
// Do not write a migration file if nothing was done
if applied == 0 {
log.Printf("I! No migration applied for %q", fn)
continue
}
// Construct the output filename
// For remote locations we just save the filename
// with the migrated suffix.
outfn := fn + ".migrated"
if remote {
u, err := url.Parse(fn)
if err != nil {
return fmt.Errorf("parsing remote config URL %q failed: %w", fn, err)
}
outfn = filepath.Base(u.Path) + ".migrated"
}
log.Printf("I! %d migration applied for %q, writing result as %q", applied, fn, outfn)
// Make sure the file does not exist yet if we should not overwrite
if !cCtx.Bool("force") {
if _, err := os.Stat(outfn); !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("output file %q already exists", outfn)
}
}
// Write the output file
if err := os.WriteFile(outfn, out, 0640); err != nil {
return fmt.Errorf("writing output %q failed: %w", outfn, err)
}
}
return nil
},
},
},
},
}
}

192
cmd/telegraf/cmd_plugins.go Normal file
View file

@ -0,0 +1,192 @@
// Command handling for configuration "plugins" command
package main
import (
"fmt"
"io"
"sort"
"strings"
"github.com/urfave/cli/v2"
"github.com/influxdata/telegraf/plugins/aggregators"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/plugins/secretstores"
"github.com/influxdata/telegraf/plugins/serializers"
)
func pluginNames[M ~map[string]V, V any](m M, prefix string) []byte {
names := make([]string, 0, len(m))
for k := range m {
names = append(names, fmt.Sprintf("%s.%s\n", prefix, k))
}
sort.Strings(names)
return []byte(strings.Join(names, ""))
}
func getPluginCommands(outputBuffer io.Writer) []*cli.Command {
return []*cli.Command{
{
Name: "plugins",
Usage: "commands for printing available plugins",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "deprecated",
Usage: "print only deprecated plugins",
},
},
Action: func(cCtx *cli.Context) error {
if cCtx.Bool("deprecated") {
outputBuffer.Write(pluginNames(inputs.Deprecations, "inputs"))
outputBuffer.Write(pluginNames(outputs.Deprecations, "outputs"))
outputBuffer.Write(pluginNames(processors.Deprecations, "processors"))
outputBuffer.Write(pluginNames(aggregators.Deprecations, "aggregators"))
outputBuffer.Write(pluginNames(secretstores.Deprecations, "secretstores"))
outputBuffer.Write(pluginNames(parsers.Deprecations, "parsers"))
outputBuffer.Write(pluginNames(serializers.Deprecations, "serializers"))
} else {
outputBuffer.Write(pluginNames(inputs.Inputs, "inputs"))
outputBuffer.Write(pluginNames(outputs.Outputs, "outputs"))
outputBuffer.Write(pluginNames(processors.Processors, "processors"))
outputBuffer.Write(pluginNames(aggregators.Aggregators, "aggregators"))
outputBuffer.Write(pluginNames(secretstores.SecretStores, "secretstores"))
outputBuffer.Write(pluginNames(parsers.Parsers, "parsers"))
outputBuffer.Write(pluginNames(serializers.Serializers, "serializers"))
}
return nil
},
Subcommands: []*cli.Command{
{
Name: "inputs",
Usage: "Print available input plugins",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "deprecated",
Usage: "print only deprecated plugins",
},
},
Action: func(cCtx *cli.Context) error {
if cCtx.Bool("deprecated") {
outputBuffer.Write(pluginNames(inputs.Deprecations, "inputs"))
} else {
outputBuffer.Write(pluginNames(inputs.Inputs, "inputs"))
}
return nil
},
},
{
Name: "outputs",
Usage: "Print available output plugins",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "deprecated",
Usage: "print only deprecated plugins",
},
},
Action: func(cCtx *cli.Context) error {
if cCtx.Bool("deprecated") {
outputBuffer.Write(pluginNames(outputs.Deprecations, "outputs"))
} else {
outputBuffer.Write(pluginNames(outputs.Outputs, "outputs"))
}
return nil
},
},
{
Name: "processors",
Usage: "Print available processor plugins",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "deprecated",
Usage: "print only deprecated plugins",
},
},
Action: func(cCtx *cli.Context) error {
if cCtx.Bool("deprecated") {
outputBuffer.Write(pluginNames(processors.Deprecations, "processors"))
} else {
outputBuffer.Write(pluginNames(processors.Processors, "processors"))
}
return nil
},
},
{
Name: "aggregators",
Usage: "Print available aggregator plugins",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "deprecated",
Usage: "print only deprecated plugins",
},
},
Action: func(cCtx *cli.Context) error {
if cCtx.Bool("deprecated") {
outputBuffer.Write(pluginNames(aggregators.Deprecations, "aggregators"))
} else {
outputBuffer.Write(pluginNames(aggregators.Aggregators, "aggregators"))
}
return nil
},
},
{
Name: "secretstores",
Usage: "Print available secretstore plugins",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "deprecated",
Usage: "print only deprecated plugins",
},
},
Action: func(cCtx *cli.Context) error {
if cCtx.Bool("deprecated") {
outputBuffer.Write(pluginNames(secretstores.Deprecations, "secretstores"))
} else {
outputBuffer.Write(pluginNames(secretstores.SecretStores, "secretstores"))
}
return nil
},
},
{
Name: "parsers",
Usage: "Print available parser plugins",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "deprecated",
Usage: "print only deprecated plugins",
},
},
Action: func(cCtx *cli.Context) error {
if cCtx.Bool("deprecated") {
outputBuffer.Write(pluginNames(parsers.Deprecations, "parsers"))
} else {
outputBuffer.Write(pluginNames(parsers.Parsers, "parsers"))
}
return nil
},
},
{
Name: "serializers",
Usage: "Print available serializer plugins",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "deprecated",
Usage: "print only deprecated plugins",
},
},
Action: func(cCtx *cli.Context) error {
if cCtx.Bool("deprecated") {
outputBuffer.Write(pluginNames(serializers.Deprecations, "serializers"))
} else {
outputBuffer.Write(pluginNames(serializers.Serializers, "serializers"))
}
return nil
},
},
},
},
}
}

View file

@ -0,0 +1,261 @@
// Command handling for secret-stores' "secrets" command
package main
import (
"errors"
"fmt"
"os"
"sort"
"strings"
"github.com/awnumar/memguard"
"github.com/urfave/cli/v2"
"golang.org/x/term"
)
func processFilterOnlySecretStoreFlags(ctx *cli.Context) Filters {
sectionFilters := []string{"inputs", "outputs", "processors", "aggregators"}
inputFilters := []string{"-"}
outputFilters := []string{"-"}
processorFilters := []string{"-"}
aggregatorFilters := []string{"-"}
// Only load the secret-stores
var secretstore string
if len(ctx.Lineage()) >= 2 {
parent := ctx.Lineage()[1] // ancestor contexts in order from child to parent
secretstore = parent.String("secretstore-filter")
}
// If both the parent and command filters are defined, append them together
secretstore = appendFilter(secretstore, ctx.String("secretstore-filter"))
secretstoreFilters := deleteEmpty(strings.Split(secretstore, ":"))
return Filters{sectionFilters, inputFilters, outputFilters, aggregatorFilters, processorFilters, secretstoreFilters}
}
func getSecretStoreCommands(m App) []*cli.Command {
return []*cli.Command{
{
Name: "secrets",
Usage: "commands for listing, adding and removing secrets on all known secret-stores",
Subcommands: []*cli.Command{
{
Name: "list",
Usage: "list known secrets and secret-stores",
Description: `
The 'list' command requires passing in your configuration file
containing the secret-store definitions you want to access. To get a
list of available secret-store plugins, please have a look at
https://github.com/influxdata/telegraf/tree/master/plugins/secretstores.
For help on how to define secret-stores, check the documentation of the
different plugins.
Assuming you use the default configuration file location, you can run
the following command to list the keys of all known secrets in ALL
available stores
> telegraf secrets list
To get the keys of all known secrets in a particular store, you can run
> telegraf secrets list mystore
To also reveal the actual secret, i.e. the value, you can pass the
'--reveal-secret' flag.
`,
ArgsUsage: "[secret-store ID]...[secret-store ID]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "reveal-secret",
Usage: "also print the secret value",
},
},
Action: func(cCtx *cli.Context) error {
// Only load the secret-stores
filters := processFilterOnlySecretStoreFlags(cCtx)
g := GlobalFlags{
config: cCtx.StringSlice("config"),
configDir: cCtx.StringSlice("config-directory"),
plugindDir: cCtx.String("plugin-directory"),
password: cCtx.String("password"),
debug: cCtx.Bool("debug"),
}
w := WindowFlags{}
m.Init(nil, filters, g, w)
args := cCtx.Args()
var storeIDs []string
if args.Present() {
storeIDs = args.Slice()
} else {
ids, err := m.ListSecretStores()
if err != nil {
return fmt.Errorf("unable to determine secret-store IDs: %w", err)
}
storeIDs = ids
}
sort.Strings(storeIDs)
reveal := cCtx.Bool("reveal-secret")
for _, storeID := range storeIDs {
store, err := m.GetSecretStore(storeID)
if err != nil {
return fmt.Errorf("unable to get secret-store %q: %w", storeID, err)
}
keys, err := store.List()
if err != nil {
return fmt.Errorf("unable to get secrets from store %q: %w", storeID, err)
}
sort.Strings(keys)
fmt.Printf("Known secrets for store %q:\n", storeID)
for _, k := range keys {
var v []byte
if reveal {
if v, err = store.Get(k); err != nil {
return fmt.Errorf("unable to get value of secret %q from store %q: %w", k, storeID, err)
}
}
fmt.Printf(" %-30s %s\n", k, string(v))
memguard.WipeBytes(v)
}
}
return nil
},
},
{
Name: "get",
Usage: "retrieves value of given secret from given store",
Description: `
The 'get' command requires passing in your configuration file
containing the secret-store definitions you want to access. To get a
list of available secret-store plugins, please have a look at
https://github.com/influxdata/telegraf/tree/master/plugins/secretstores.
and use the 'secrets list' command to get the IDs of available stores and
key(s) of available secrets.
For help on how to define secret-stores, check the documentation of the
different plugins.
Assuming you use the default configuration file location, you can run
the following command to retrieve a secret from a secret store
available stores
> telegraf secrets get mystore mysecretkey
This will fetch the secret with the key 'mysecretkey' from the secret-store
with the ID 'mystore'.
`,
ArgsUsage: "<secret-store ID> <secret key>",
Action: func(cCtx *cli.Context) error {
// Only load the secret-stores
filters := processFilterOnlySecretStoreFlags(cCtx)
g := GlobalFlags{
config: cCtx.StringSlice("config"),
configDir: cCtx.StringSlice("config-directory"),
plugindDir: cCtx.String("plugin-directory"),
password: cCtx.String("password"),
debug: cCtx.Bool("debug"),
}
w := WindowFlags{}
m.Init(nil, filters, g, w)
args := cCtx.Args()
if !args.Present() || args.Len() != 2 {
return errors.New("invalid number of arguments")
}
storeID := args.First()
key := args.Get(1)
store, err := m.GetSecretStore(storeID)
if err != nil {
return fmt.Errorf("unable to get secret-store: %w", err)
}
value, err := store.Get(key)
if err != nil {
return fmt.Errorf("unable to get secret: %w", err)
}
fmt.Printf("%s:%s = %s\n", storeID, key, value)
return nil
},
},
{
Name: "set",
Usage: "create or modify a secret in the given store",
Description: `
The 'set' command requires passing in your configuration file
containing the secret-store definitions you want to access. To get a
list of available secret-store plugins, please have a look at
https://github.com/influxdata/telegraf/tree/master/plugins/secretstores.
and use the 'secrets list' command to get the IDs of available stores and keys.
For help on how to define secret-stores, check the documentation of the
different plugins.
Assuming you use the default configuration file location, you can run
the following command to create a secret in anm available secret-store
> telegraf secrets set mystore mysecretkey mysecretvalue
This will create a secret with the key 'mysecretkey' in the secret-store
with the ID 'mystore' with the value being set to 'mysecretvalue'. If a
secret with that key ('mysecretkey') already existed in that store, its
value will be modified.
When you leave out the value of the secret like
> telegraf secrets set mystore mysecretkey
you will be prompted to enter the value of the secret.
`,
ArgsUsage: "<secret-store ID> <secret key>",
Action: func(cCtx *cli.Context) error {
// Only load the secret-stores
filters := processFilterOnlySecretStoreFlags(cCtx)
g := GlobalFlags{
config: cCtx.StringSlice("config"),
configDir: cCtx.StringSlice("config-directory"),
plugindDir: cCtx.String("plugin-directory"),
password: cCtx.String("password"),
debug: cCtx.Bool("debug"),
}
w := WindowFlags{}
m.Init(nil, filters, g, w)
args := cCtx.Args()
if !args.Present() || args.Len() < 2 {
return errors.New("invalid number of arguments")
}
storeID := args.First()
key := args.Get(1)
value := args.Get(2)
if value == "" {
fmt.Printf("Enter secret value: ")
b, err := term.ReadPassword(int(os.Stdin.Fd()))
if err != nil {
return err
}
fmt.Println()
value = string(b)
}
store, err := m.GetSecretStore(storeID)
if err != nil {
return fmt.Errorf("unable to get secret-store: %w", err)
}
if err := store.Set(key, value); err != nil {
return fmt.Errorf("unable to set secret: %w", err)
}
return nil
},
},
},
},
}
}

View file

@ -0,0 +1,203 @@
//go:build windows
// Command handling for configuration "service" command
package main
import (
"errors"
"fmt"
"io"
"github.com/urfave/cli/v2"
"golang.org/x/sys/windows"
)
func cliFlags() []cli.Flag {
return []cli.Flag{
&cli.StringFlag{
Name: "service",
Usage: "operate on the service (windows only)",
},
&cli.StringFlag{
Name: "service-name",
Value: "telegraf",
Usage: "service name (windows only)",
},
&cli.StringFlag{
Name: "service-display-name",
Value: "Telegraf Data Collector Service",
Usage: "service display name (windows only)",
},
&cli.StringFlag{
Name: "service-restart-delay",
Value: "5m",
},
&cli.BoolFlag{
Name: "service-auto-restart",
Usage: "auto restart service on failure (windows only)",
},
&cli.BoolFlag{
Name: "console",
Usage: "run as console application (windows only)",
},
}
}
func getServiceCommands(outputBuffer io.Writer) []*cli.Command {
return []*cli.Command{
{
Name: "service",
Usage: "commands for operate on the Windows service",
Flags: nil,
Subcommands: []*cli.Command{
{
Name: "install",
Usage: "install Telegraf as a Windows service",
Description: `
The 'install' command with create a Windows service for automatically starting
Telegraf with the specified configuration and service parameters. If no
configuration(s) is specified the service will use the file in
"C:\Program Files\Telegraf\telegraf.conf".
To install Telegraf as a service use
> telegraf service install
In case you are planning to start multiple Telegraf instances as a service,
you must use distinctive service-names for each instance. To install two
services with different configurations use
> telegraf --config "C:\Program Files\Telegraf\telegraf-machine.conf" --service-name telegraf-machine service install
> telegraf --config "C:\Program Files\Telegraf\telegraf-service.conf" --service-name telegraf-service service install
`,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "display-name",
Value: "Telegraf Data Collector Service",
Usage: "service name as displayed in the service manager",
},
&cli.StringFlag{
Name: "restart-delay",
Value: "5m",
Usage: "duration for delaying the service restart on failure",
},
&cli.BoolFlag{
Name: "auto-restart",
Usage: "enable automatic service restart on failure",
},
},
Action: func(cCtx *cli.Context) error {
cfg := &serviceConfig{
displayName: cCtx.String("display-name"),
restartDelay: cCtx.String("restart-delay"),
autoRestart: cCtx.Bool("auto-restart"),
configs: cCtx.StringSlice("config"),
configDirs: cCtx.StringSlice("config-directory"),
}
name := cCtx.String("service-name")
if err := installService(name, cfg); err != nil {
return err
}
fmt.Fprintf(outputBuffer, "Successfully installed service %q\n", name)
return nil
},
},
{
Name: "uninstall",
Usage: "remove the Telegraf Windows service",
Description: `
The 'uninstall' command removes the Telegraf service with the given name. To
remove a service use
> telegraf service uninstall
In case you specified a custom service-name during install use
> telegraf --service-name telegraf-machine service uninstall
`,
Action: func(cCtx *cli.Context) error {
name := cCtx.String("service-name")
if err := uninstallService(name); err != nil {
return err
}
fmt.Fprintf(outputBuffer, "Successfully uninstalled service %q\n", name)
return nil
},
},
{
Name: "start",
Usage: "start the Telegraf Windows service",
Description: `
The 'start' command triggers the start of the Windows service with the given
name. To start the service either use the Windows service manager or run
> telegraf service start
In case you specified a custom service-name during install use
> telegraf --service-name telegraf-machine service start
`,
Action: func(cCtx *cli.Context) error {
name := cCtx.String("service-name")
if err := startService(name); err != nil {
return err
}
fmt.Fprintf(outputBuffer, "Successfully started service %q\n", name)
return nil
},
},
{
Name: "stop",
Usage: "stop the Telegraf Windows service",
Description: `
The 'stop' command triggers the stop of the Windows service with the given
name and will wait until the service is actually stopped. To stop the service
either use the Windows service manager or run
> telegraf service stop
In case you specified a custom service-name during install use
> telegraf --service-name telegraf-machine service stop
`,
Action: func(cCtx *cli.Context) error {
name := cCtx.String("service-name")
if err := stopService(name); err != nil {
if errors.Is(err, windows.ERROR_SERVICE_NOT_ACTIVE) {
fmt.Fprintf(outputBuffer, "Service %q not started\n", name)
return nil
}
return err
}
fmt.Fprintf(outputBuffer, "Successfully stopped service %q\n", name)
return nil
},
},
{
Name: "status",
Usage: "query the Telegraf Windows service status",
Description: `
The 'status' command queries the current state of the Windows service with the
given name. To query the service either check the Windows service manager or run
> telegraf service status
In case you specified a custom service-name during install use
> telegraf --service-name telegraf-machine service status
`,
Action: func(cCtx *cli.Context) error {
name := cCtx.String("service-name")
status, err := queryService(name)
if err != nil {
return err
}
fmt.Fprintf(outputBuffer, "Service %q is in %q state\n", name, status)
return nil
},
},
},
},
}
}

View file

@ -0,0 +1,17 @@
//go:build !windows
package main
import (
"io"
"github.com/urfave/cli/v2"
)
func cliFlags() []cli.Flag {
return make([]cli.Flag, 0)
}
func getServiceCommands(io.Writer) []*cli.Command {
return nil
}

417
cmd/telegraf/main.go Normal file
View file

@ -0,0 +1,417 @@
package main
import (
"fmt"
"io"
"log"
"os"
"sort"
"strings"
"github.com/awnumar/memguard"
"github.com/urfave/cli/v2"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/goplugin"
"github.com/influxdata/telegraf/logger"
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
"github.com/influxdata/telegraf/plugins/inputs"
_ "github.com/influxdata/telegraf/plugins/inputs/all"
"github.com/influxdata/telegraf/plugins/outputs"
_ "github.com/influxdata/telegraf/plugins/outputs/all"
_ "github.com/influxdata/telegraf/plugins/parsers/all"
_ "github.com/influxdata/telegraf/plugins/processors/all"
_ "github.com/influxdata/telegraf/plugins/secretstores/all"
_ "github.com/influxdata/telegraf/plugins/serializers/all"
)
type TelegrafConfig interface {
CollectDeprecationInfos([]string, []string, []string, []string) map[string][]config.PluginDeprecationInfo
PrintDeprecationList([]config.PluginDeprecationInfo)
}
type Filters struct {
section []string
input []string
output []string
aggregator []string
processor []string
secretstore []string
}
func appendFilter(a, b string) string {
if a != "" && b != "" {
return fmt.Sprintf("%s:%s", a, b)
}
if a != "" {
return a
}
return b
}
func processFilterFlags(ctx *cli.Context) Filters {
var section, input, output, aggregator, processor, secretstore string
// Support defining filters before and after the command
// The old style was:
// ./telegraf --section-filter inputs --input-filter cpu config >test.conf
// The new style is:
// ./telegraf config --section-filter inputs --input-filter cpu >test.conf
// To support the old style, check if the parent context has the filter flags defined
if len(ctx.Lineage()) >= 2 {
parent := ctx.Lineage()[1] // ancestor contexts in order from child to parent
section = parent.String("section-filter")
input = parent.String("input-filter")
output = parent.String("output-filter")
aggregator = parent.String("aggregator-filter")
processor = parent.String("processor-filter")
secretstore = parent.String("secretstore-filter")
}
// If both the parent and command filters are defined, append them together
section = appendFilter(section, ctx.String("section-filter"))
input = appendFilter(input, ctx.String("input-filter"))
output = appendFilter(output, ctx.String("output-filter"))
aggregator = appendFilter(aggregator, ctx.String("aggregator-filter"))
processor = appendFilter(processor, ctx.String("processor-filter"))
secretstore = appendFilter(secretstore, ctx.String("secretstore-filter"))
sectionFilters := deleteEmpty(strings.Split(section, ":"))
inputFilters := deleteEmpty(strings.Split(input, ":"))
outputFilters := deleteEmpty(strings.Split(output, ":"))
aggregatorFilters := deleteEmpty(strings.Split(aggregator, ":"))
processorFilters := deleteEmpty(strings.Split(processor, ":"))
secretstoreFilters := deleteEmpty(strings.Split(secretstore, ":"))
return Filters{sectionFilters, inputFilters, outputFilters, aggregatorFilters, processorFilters, secretstoreFilters}
}
func deleteEmpty(s []string) []string {
var r []string
for _, str := range s {
if str != "" {
r = append(r, str)
}
}
return r
}
// runApp defines all the subcommands and flags for Telegraf
// this abstraction is used for testing, so outputBuffer and args can be changed
func runApp(args []string, outputBuffer io.Writer, pprof Server, c TelegrafConfig, m App) error {
configHandlingFlags := []cli.Flag{
&cli.StringSliceFlag{
Name: "config",
Usage: "configuration file to load",
},
&cli.StringSliceFlag{
Name: "config-directory",
Usage: "directory containing additional *.conf files",
},
&cli.StringFlag{
Name: "section-filter",
Usage: "filter the sections to print, separator is ':'. " +
"Valid values are 'agent', 'global_tags', 'outputs', 'processors', 'aggregators' and 'inputs'",
},
&cli.StringFlag{
Name: "input-filter",
Usage: "filter the inputs to enable, separator is ':'",
},
&cli.StringFlag{
Name: "output-filter",
Usage: "filter the outputs to enable, separator is ':'",
},
&cli.StringFlag{
Name: "aggregator-filter",
Usage: "filter the aggregators to enable, separator is ':'",
},
&cli.StringFlag{
Name: "processor-filter",
Usage: "filter the processors to enable, separator is ':'",
},
&cli.StringFlag{
Name: "secretstore-filter",
Usage: "filter the secret-stores to enable, separator is ':'",
},
}
mainFlags := append(configHandlingFlags, cliFlags()...)
// This function is used when Telegraf is run with only flags
action := func(cCtx *cli.Context) error {
// We do not expect any arguments this is likely a misspelling of
// a command...
if cCtx.NArg() > 0 {
return fmt.Errorf("unknown command %q", cCtx.Args().First())
}
// Deprecated: Use execd instead
// Load external plugins, if requested.
if cCtx.String("plugin-directory") != "" {
log.Printf("I! Loading external plugins from: %s", cCtx.String("plugin-directory"))
if err := goplugin.LoadExternalPlugins(cCtx.String("plugin-directory")); err != nil {
return err
}
}
// switch for flags which just do something and exit immediately
switch {
// print available input plugins
case cCtx.Bool("deprecation-list"):
filters := processFilterFlags(cCtx)
infos := c.CollectDeprecationInfos(
filters.input, filters.output, filters.aggregator, filters.processor,
)
outputBuffer.Write([]byte("Deprecated Input Plugins:\n"))
c.PrintDeprecationList(infos["inputs"])
outputBuffer.Write([]byte("Deprecated Output Plugins:\n"))
c.PrintDeprecationList(infos["outputs"])
outputBuffer.Write([]byte("Deprecated Processor Plugins:\n"))
c.PrintDeprecationList(infos["processors"])
outputBuffer.Write([]byte("Deprecated Aggregator Plugins:\n"))
c.PrintDeprecationList(infos["aggregators"])
return nil
// print available output plugins
case cCtx.Bool("output-list"):
outputBuffer.Write([]byte("DEPRECATED: use telegraf plugins outputs\n"))
outputBuffer.Write([]byte("Available Output Plugins:\n"))
names := make([]string, 0, len(outputs.Outputs))
for k := range outputs.Outputs {
names = append(names, k)
}
sort.Strings(names)
for _, k := range names {
fmt.Fprintf(outputBuffer, " %s\n", k)
}
return nil
// print available input plugins
case cCtx.Bool("input-list"):
outputBuffer.Write([]byte("DEPRECATED: use telegraf plugins inputs\n"))
outputBuffer.Write([]byte("Available Input Plugins:\n"))
names := make([]string, 0, len(inputs.Inputs))
for k := range inputs.Inputs {
names = append(names, k)
}
sort.Strings(names)
for _, k := range names {
fmt.Fprintf(outputBuffer, " %s\n", k)
}
return nil
// print usage for a plugin, ie, 'telegraf --usage mysql'
case cCtx.String("usage") != "":
err := PrintInputConfig(cCtx.String("usage"), outputBuffer)
err2 := PrintOutputConfig(cCtx.String("usage"), outputBuffer)
if err != nil && err2 != nil {
return fmt.Errorf("%w and %w", err, err2)
}
return nil
// DEPRECATED
case cCtx.Bool("version"):
fmt.Fprintf(outputBuffer, "%s\n", internal.FormatFullVersion())
return nil
// DEPRECATED
case cCtx.Bool("sample-config"):
filters := processFilterFlags(cCtx)
printSampleConfig(outputBuffer, filters)
return nil
}
if cCtx.String("pprof-addr") != "" {
pprof.Start(cCtx.String("pprof-addr"))
}
filters := processFilterFlags(cCtx)
g := GlobalFlags{
config: cCtx.StringSlice("config"),
configDir: cCtx.StringSlice("config-directory"),
testWait: cCtx.Int("test-wait"),
configURLRetryAttempts: cCtx.Int("config-url-retry-attempts"),
configURLWatchInterval: cCtx.Duration("config-url-watch-interval"),
watchConfig: cCtx.String("watch-config"),
watchInterval: cCtx.Duration("watch-interval"),
pidFile: cCtx.String("pidfile"),
plugindDir: cCtx.String("plugin-directory"),
password: cCtx.String("password"),
oldEnvBehavior: cCtx.Bool("old-env-behavior"),
printPluginConfigSource: cCtx.Bool("print-plugin-config-source"),
test: cCtx.Bool("test"),
debug: cCtx.Bool("debug"),
once: cCtx.Bool("once"),
quiet: cCtx.Bool("quiet"),
unprotected: cCtx.Bool("unprotected"),
}
w := WindowFlags{
service: cCtx.String("service"),
serviceName: cCtx.String("service-name"),
serviceDisplayName: cCtx.String("service-display-name"),
serviceRestartDelay: cCtx.String("service-restart-delay"),
serviceAutoRestart: cCtx.Bool("service-auto-restart"),
console: cCtx.Bool("console"),
}
m.Init(pprof.ErrChan(), filters, g, w)
return m.Run()
}
commands := append(
getConfigCommands(configHandlingFlags, outputBuffer),
getSecretStoreCommands(m)...,
)
commands = append(commands, getPluginCommands(outputBuffer)...)
commands = append(commands, getServiceCommands(outputBuffer)...)
app := &cli.App{
Name: "Telegraf",
Usage: "The plugin-driven server agent for collecting & reporting metrics.",
Writer: outputBuffer,
Flags: append(
[]cli.Flag{
// Int flags
&cli.IntFlag{
Name: "test-wait",
Usage: "wait up to this many seconds for service inputs to complete in test mode",
},
&cli.IntFlag{
Name: "config-url-retry-attempts",
Usage: "Number of attempts to obtain a remote configuration via a URL during startup. " +
"Set to -1 for unlimited attempts.",
DefaultText: "3",
},
//
// String flags
&cli.StringFlag{
Name: "usage",
Usage: "print usage for a plugin, ie, 'telegraf --usage mysql'",
},
&cli.StringFlag{
Name: "pprof-addr",
Usage: "pprof host/IP and port to listen on (e.g. 'localhost:6060')",
},
&cli.StringFlag{
Name: "watch-config",
Usage: "monitoring config changes [notify, poll] of --config and --config-directory options. " +
"Notify supports linux, *bsd, and macOS. Poll is required for Windows and checks every 250ms.",
},
&cli.StringFlag{
Name: "pidfile",
Usage: "file to write our pid to",
},
&cli.StringFlag{
Name: "password",
Usage: "password to unlock secret-stores",
},
//
// Bool flags
&cli.BoolFlag{
Name: "old-env-behavior",
Usage: "switch back to pre v1.27 environment replacement behavior",
},
&cli.BoolFlag{
Name: "print-plugin-config-source",
Usage: "print the source for a given plugin",
},
&cli.BoolFlag{
Name: "once",
Usage: "run one gather and exit",
},
&cli.BoolFlag{
Name: "debug",
Usage: "turn on debug logging",
},
&cli.BoolFlag{
Name: "quiet",
Usage: "run in quiet mode",
},
&cli.BoolFlag{
Name: "unprotected",
Usage: "do not protect secrets in memory",
},
&cli.BoolFlag{
Name: "test",
Usage: "enable test mode: gather metrics, print them out, and exit. " +
"Note: Test mode only runs inputs, not processors, aggregators, or outputs",
},
//
// Duration flags
&cli.DurationFlag{
Name: "watch-interval",
Usage: "Time duration to check for updates to config files specified by --config and " +
"--config-directory options. Use with '--watch-config poll'",
DefaultText: "disabled",
},
&cli.DurationFlag{
Name: "config-url-watch-interval",
Usage: "Time duration to check for updates to URL based configuration files",
DefaultText: "disabled",
},
// TODO: Change "deprecation-list, input-list, output-list" flags to become a subcommand "list" that takes
// "input,output,aggregator,processor, deprecated" as parameters
&cli.BoolFlag{
Name: "deprecation-list",
Usage: "print all deprecated plugins or plugin options",
},
&cli.BoolFlag{
Name: "input-list",
Usage: "print available input plugins",
},
&cli.BoolFlag{
Name: "output-list",
Usage: "print available output plugins",
},
//
// !!! The following flags are DEPRECATED !!!
// Already covered with the subcommand `./telegraf version`
&cli.BoolFlag{
Name: "version",
Usage: "DEPRECATED: display the version and exit",
},
// Already covered with the subcommand `./telegraf config`
&cli.BoolFlag{
Name: "sample-config",
Usage: "DEPRECATED: print out full sample configuration",
},
// Using execd plugin to add external plugins is preferred (less size impact, easier for end user)
&cli.StringFlag{
Name: "plugin-directory",
Usage: "DEPRECATED: path to directory containing external plugins",
},
// !!!
}, mainFlags...),
Action: action,
Commands: append([]*cli.Command{
{
Name: "version",
Usage: "print current version to stdout",
Action: func(*cli.Context) error {
fmt.Fprintf(outputBuffer, "%s\n", internal.FormatFullVersion())
return nil
},
},
}, commands...),
}
// Make sure we safely erase secrets
defer memguard.Purge()
defer logger.CloseLogging()
if err := app.Run(args); err != nil {
log.Printf("E! %s", err)
return err
}
return nil
}
func main() {
// #13481: disables gh:99designs/keyring kwallet.go from connecting to dbus
os.Setenv("DISABLE_KWALLET", "1")
agent := Telegraf{}
pprof := NewPprofServer()
c := config.NewConfig()
if err := runApp(os.Args, os.Stdout, pprof, c, &agent); err != nil {
os.Exit(1)
}
}

574
cmd/telegraf/main_test.go Normal file
View file

@ -0,0 +1,574 @@
package main
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
)
var secrets = map[string]map[string][]byte{
"yoda": {
"episode1": []byte("member"),
"episode2": []byte("member"),
"episode3": []byte("member"),
},
"mace_windu": {
"episode1": []byte("member"),
"episode2": []byte("member"),
"episode3": []byte("member"),
},
"oppo_rancisis": {
"episode1": []byte("member"),
"episode2": []byte("member"),
},
"coleman_kcaj": {
"episode3": []byte("member"),
},
}
type MockTelegraf struct {
GlobalFlags
WindowFlags
}
func NewMockTelegraf() *MockTelegraf {
return &MockTelegraf{}
}
func (m *MockTelegraf) Init(_ <-chan error, _ Filters, g GlobalFlags, w WindowFlags) {
m.GlobalFlags = g
m.WindowFlags = w
}
func (*MockTelegraf) Run() error {
return nil
}
func (*MockTelegraf) ListSecretStores() ([]string, error) {
ids := make([]string, 0, len(secrets))
for k := range secrets {
ids = append(ids, k)
}
return ids, nil
}
func (*MockTelegraf) GetSecretStore(id string) (telegraf.SecretStore, error) {
v, found := secrets[id]
if !found {
return nil, errors.New("unknown secret store")
}
s := &MockSecretStore{Secrets: v}
return s, nil
}
type MockSecretStore struct {
Secrets map[string][]byte
}
func (*MockSecretStore) Init() error {
return nil
}
func (*MockSecretStore) SampleConfig() string {
return "I'm just a dummy"
}
func (s *MockSecretStore) Get(key string) ([]byte, error) {
v, found := s.Secrets[key]
if !found {
return nil, errors.New("not found")
}
return v, nil
}
func (s *MockSecretStore) Set(key, value string) error {
if strings.HasPrefix(key, "darth") {
return errors.New("don't join the dark side")
}
s.Secrets[key] = []byte(value)
return nil
}
func (s *MockSecretStore) List() ([]string, error) {
keys := make([]string, 0, len(s.Secrets))
for k := range s.Secrets {
keys = append(keys, k)
}
return keys, nil
}
func (s *MockSecretStore) GetResolver(key string) (telegraf.ResolveFunc, error) {
return func() ([]byte, bool, error) {
v, err := s.Get(key)
return v, false, err
}, nil
}
type MockConfig struct {
Buffer io.Writer
ExpectedDeprecatedPlugins map[string][]config.PluginDeprecationInfo
}
func NewMockConfig(buffer io.Writer) *MockConfig {
return &MockConfig{
Buffer: buffer,
}
}
func (m *MockConfig) CollectDeprecationInfos(_, _, _, _ []string) map[string][]config.PluginDeprecationInfo {
return m.ExpectedDeprecatedPlugins
}
func (m *MockConfig) PrintDeprecationList(plugins []config.PluginDeprecationInfo) {
for _, p := range plugins {
fmt.Fprintf(m.Buffer, "plugin name: %s\n", p.Name)
}
}
type MockServer struct {
Address string
}
func NewMockServer() *MockServer {
return &MockServer{}
}
func (m *MockServer) Start(_ string) {
m.Address = "localhost:6060"
}
func (*MockServer) ErrChan() <-chan error {
return nil
}
func TestUsageFlag(t *testing.T) {
tests := []struct {
PluginName string
ExpectedError string
ExpectedOutput string
}{
{
PluginName: "example",
ExpectedError: "input example not found and output example not found",
},
{
PluginName: "temp",
ExpectedOutput: `
# Read metrics about temperature
[[inputs.temp]]
## Desired output format (Linux only)
## Available values are
## v1 -- use pre-v1.22.4 sensor naming, e.g. coretemp_core0_input
## v2 -- use v1.22.4+ sensor naming, e.g. coretemp_core_0_input
# metric_format = "v2"
## Add device tag to distinguish devices with the same name (Linux only)
# add_device_tag = false
`,
},
}
for _, test := range tests {
buf := new(bytes.Buffer)
args := os.Args[0:1]
args = append(args, "--usage", test.PluginName)
err := runApp(args, buf, NewMockServer(), NewMockConfig(buf), NewMockTelegraf())
if test.ExpectedError != "" {
require.ErrorContains(t, err, test.ExpectedError)
continue
}
require.NoError(t, err)
// To run this test on windows and linux, remove windows carriage return
o := strings.Replace(buf.String(), "\r", "", -1)
require.Equal(t, test.ExpectedOutput, o)
}
}
func TestInputListFlag(t *testing.T) {
buf := new(bytes.Buffer)
args := os.Args[0:1]
args = append(args, "--input-list")
temp := inputs.Inputs
inputs.Inputs = map[string]inputs.Creator{
"test": func() telegraf.Input { return nil },
}
err := runApp(args, buf, NewMockServer(), NewMockConfig(buf), NewMockTelegraf())
require.NoError(t, err)
expectedOutput := `DEPRECATED: use telegraf plugins inputs
Available Input Plugins:
test
`
require.Equal(t, expectedOutput, buf.String())
inputs.Inputs = temp
}
func TestOutputListFlag(t *testing.T) {
buf := new(bytes.Buffer)
args := os.Args[0:1]
args = append(args, "--output-list")
temp := outputs.Outputs
outputs.Outputs = map[string]outputs.Creator{
"test": func() telegraf.Output { return nil },
}
err := runApp(args, buf, NewMockServer(), NewMockConfig(buf), NewMockTelegraf())
require.NoError(t, err)
expectedOutput := `DEPRECATED: use telegraf plugins outputs
Available Output Plugins:
test
`
require.Equal(t, expectedOutput, buf.String())
outputs.Outputs = temp
}
func TestDeprecationListFlag(t *testing.T) {
buf := new(bytes.Buffer)
args := os.Args[0:1]
args = append(args, "--deprecation-list")
mS := NewMockServer()
mC := NewMockConfig(buf)
mC.ExpectedDeprecatedPlugins = make(map[string][]config.PluginDeprecationInfo)
mC.ExpectedDeprecatedPlugins["inputs"] = []config.PluginDeprecationInfo{
{
DeprecationInfo: config.DeprecationInfo{
Name: "test",
},
},
}
err := runApp(args, buf, mS, mC, NewMockTelegraf())
require.NoError(t, err)
expectedOutput := `Deprecated Input Plugins:
plugin name: test
Deprecated Output Plugins:
Deprecated Processor Plugins:
Deprecated Aggregator Plugins:
`
require.Equal(t, expectedOutput, buf.String())
}
func TestPprofAddressFlag(t *testing.T) {
buf := new(bytes.Buffer)
args := os.Args[0:1]
address := "localhost:6060"
args = append(args, "--pprof-addr", address)
m := NewMockServer()
err := runApp(args, buf, m, NewMockConfig(buf), NewMockTelegraf())
require.NoError(t, err)
require.Equal(t, address, m.Address)
}
// !!! DEPRECATED !!!
// TestPluginDirectoryFlag tests `--plugin-directory`
func TestPluginDirectoryFlag(t *testing.T) {
buf := new(bytes.Buffer)
args := os.Args[0:1]
args = append(args, "--plugin-directory", ".")
err := runApp(args, buf, NewMockServer(), NewMockConfig(buf), NewMockTelegraf())
require.ErrorContains(t, err, "go plugin support is not enabled")
}
func TestCommandConfig(t *testing.T) {
tests := []struct {
name string
commands []string
expectedHeaders []string
removedHeaders []string
expectedPlugins []string
removedPlugins []string
}{
{
name: "deprecated flag --sample-config",
commands: []string{"--sample-config"},
expectedHeaders: []string{
outputHeader,
inputHeader,
aggregatorHeader,
processorHeader,
serviceInputHeader,
},
},
{
name: "no filters",
commands: []string{"config"},
expectedHeaders: []string{
outputHeader,
inputHeader,
aggregatorHeader,
processorHeader,
serviceInputHeader,
},
},
{
name: "filter sections for inputs",
commands: []string{"config", "--section-filter", "inputs"},
expectedHeaders: []string{
inputHeader,
},
removedHeaders: []string{
outputHeader,
aggregatorHeader,
processorHeader,
},
},
{
name: "filter sections for inputs,outputs",
commands: []string{"config", "--section-filter", "inputs:outputs"},
expectedHeaders: []string{
inputHeader,
outputHeader,
},
removedHeaders: []string{
aggregatorHeader,
processorHeader,
},
},
{
name: "filter input plugins",
commands: []string{"config", "--input-filter", "cpu:file"},
expectedPlugins: []string{
"[[inputs.cpu]]",
"[[inputs.file]]",
},
removedPlugins: []string{
"[[inputs.disk]]",
},
},
{
name: "filter output plugins",
commands: []string{"config", "--output-filter", "influxdb:http"},
expectedPlugins: []string{
"[[outputs.influxdb]]",
"[[outputs.http]]",
},
removedPlugins: []string{
"[[outputs.file]]",
},
},
{
name: "filter processor plugins",
commands: []string{"config", "--processor-filter", "date:enum"},
expectedPlugins: []string{
"[[processors.date]]",
"[[processors.enum]]",
},
removedPlugins: []string{
"[[processors.parser]]",
},
},
{
name: "filter aggregator plugins",
commands: []string{"config", "--aggregator-filter", "basicstats:starlark"},
expectedPlugins: []string{
"[[aggregators.basicstats]]",
"[[aggregators.starlark]]",
},
removedPlugins: []string{
"[[aggregators.minmax]]",
},
},
{
name: "test filters before config",
commands: []string{"--input-filter", "cpu:file", "config"},
expectedPlugins: []string{
"[[inputs.cpu]]",
"[[inputs.file]]",
},
removedPlugins: []string{
"[[inputs.disk]]",
},
},
{
name: "test filters before and after config",
commands: []string{"--input-filter", "file", "config", "--input-filter", "cpu"},
expectedPlugins: []string{
"[[inputs.cpu]]",
"[[inputs.file]]",
},
removedPlugins: []string{
"[[inputs.disk]]",
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
buf := new(bytes.Buffer)
args := os.Args[0:1]
args = append(args, test.commands...)
err := runApp(args, buf, NewMockServer(), NewMockConfig(buf), NewMockTelegraf())
require.NoError(t, err)
output := buf.String()
for _, e := range test.expectedHeaders {
require.Contains(t, output, e, "expected header not found")
}
for _, r := range test.removedHeaders {
require.NotContains(t, output, r, "removed header found")
}
for _, e := range test.expectedPlugins {
require.Contains(t, output, e, "expected plugin not found")
}
for _, r := range test.removedPlugins {
require.NotContains(t, output, r, "removed plugin found")
}
})
}
}
func TestCommandVersion(t *testing.T) {
tests := []struct {
Version string
Branch string
Commit string
ExpectedOutput string
}{
{
Version: "v2.0.0",
ExpectedOutput: "Telegraf v2.0.0\n",
},
{
ExpectedOutput: "Telegraf unknown\n",
},
{
Version: "v2.0.0",
Branch: "master",
ExpectedOutput: "Telegraf v2.0.0 (git: master@unknown)\n",
},
{
Version: "v2.0.0",
Branch: "master",
Commit: "123",
ExpectedOutput: "Telegraf v2.0.0 (git: master@123)\n",
},
{
Version: "v2.0.0",
Commit: "123",
ExpectedOutput: "Telegraf v2.0.0 (git: unknown@123)\n",
},
}
for _, test := range tests {
buf := new(bytes.Buffer)
args := os.Args[0:1]
args = append(args, "version")
internal.Version = test.Version
internal.Branch = test.Branch
internal.Commit = test.Commit
err := runApp(args, buf, NewMockServer(), NewMockConfig(buf), NewMockTelegraf())
require.NoError(t, err)
require.Equal(t, test.ExpectedOutput, buf.String())
}
}
// Users should use the version subcommand
func TestFlagVersion(t *testing.T) {
tests := []struct {
Version string
Branch string
Commit string
ExpectedOutput string
}{
{
Version: "v2.0.0",
ExpectedOutput: "Telegraf v2.0.0\n",
},
{
ExpectedOutput: "Telegraf unknown\n",
},
{
Version: "v2.0.0",
Branch: "master",
ExpectedOutput: "Telegraf v2.0.0 (git: master@unknown)\n",
},
{
Version: "v2.0.0",
Branch: "master",
Commit: "123",
ExpectedOutput: "Telegraf v2.0.0 (git: master@123)\n",
},
{
Version: "v2.0.0",
Commit: "123",
ExpectedOutput: "Telegraf v2.0.0 (git: unknown@123)\n",
},
}
for _, test := range tests {
buf := new(bytes.Buffer)
args := os.Args[0:1]
args = append(args, "--version")
internal.Version = test.Version
internal.Branch = test.Branch
internal.Commit = test.Commit
err := runApp(args, buf, NewMockServer(), NewMockConfig(buf), NewMockTelegraf())
require.NoError(t, err)
require.Equal(t, test.ExpectedOutput, buf.String())
}
}
func TestGlobablBoolFlags(t *testing.T) {
commands := []string{
"--debug",
"--test",
"--quiet",
"--once",
}
buf := new(bytes.Buffer)
args := os.Args[0:1]
args = append(args, commands...)
m := NewMockTelegraf()
err := runApp(args, buf, NewMockServer(), NewMockConfig(buf), m)
require.NoError(t, err)
require.True(t, m.debug)
require.True(t, m.test)
require.True(t, m.once)
require.True(t, m.quiet)
}
func TestFlagsAreSet(t *testing.T) {
expectedInt := 1
expectedString := "test"
commands := []string{
"--config", expectedString,
"--config-directory", expectedString,
"--debug",
"--test",
"--quiet",
"--once",
"--test-wait", strconv.Itoa(expectedInt),
"--watch-config", expectedString,
"--pidfile", expectedString,
}
buf := new(bytes.Buffer)
args := os.Args[0:1]
args = append(args, commands...)
m := NewMockTelegraf()
err := runApp(args, buf, NewMockServer(), NewMockConfig(buf), m)
require.NoError(t, err)
require.Equal(t, []string{expectedString}, m.config)
require.Equal(t, []string{expectedString}, m.configDir)
require.True(t, m.debug)
require.True(t, m.test)
require.True(t, m.once)
require.True(t, m.quiet)
require.Equal(t, expectedInt, m.testWait)
require.Equal(t, expectedString, m.watchConfig)
require.Equal(t, expectedString, m.pidFile)
}

View file

@ -0,0 +1,38 @@
//go:build windows
package main
import (
"bytes"
"os"
"testing"
"github.com/stretchr/testify/require"
)
func TestWindowsFlagsAreSet(t *testing.T) {
expectedString := "test"
commands := []string{
"--service", expectedString,
"--service-name", expectedString,
"--service-display-name", expectedString,
"--service-restart-delay", expectedString,
"--service-auto-restart",
"--console",
}
buf := new(bytes.Buffer)
args := os.Args[0:1]
args = append(args, commands...)
m := NewMockTelegraf()
err := runApp(args, buf, NewMockServer(), NewMockConfig(buf), m)
require.NoError(t, err)
require.Equal(t, expectedString, m.service)
require.Equal(t, expectedString, m.serviceName)
require.Equal(t, expectedString, m.serviceDisplayName)
require.Equal(t, expectedString, m.serviceRestartDelay)
require.True(t, m.serviceAutoRestart)
require.True(t, m.console)
}

52
cmd/telegraf/pprof.go Normal file
View file

@ -0,0 +1,52 @@
package main
import (
"log"
"net/http"
_ "net/http/pprof" //nolint:gosec // Import for pprof, only enabled via CLI flag
"strings"
"time"
)
type Server interface {
Start(string)
ErrChan() <-chan error
}
type PprofServer struct {
err chan error
}
func NewPprofServer() *PprofServer {
return &PprofServer{
err: make(chan error),
}
}
func (p *PprofServer) Start(address string) {
go func() {
pprofHostPort := address
parts := strings.Split(pprofHostPort, ":")
if len(parts) == 2 && parts[0] == "" {
pprofHostPort = "localhost:" + parts[1]
}
pprofHostPort = "http://" + pprofHostPort + "/debug/pprof"
log.Printf("I! Starting pprof HTTP server at: %s", pprofHostPort)
server := &http.Server{
Addr: address,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
}
if err := server.ListenAndServe(); err != nil {
p.err <- err
}
close(p.err)
}()
}
func (p *PprofServer) ErrChan() <-chan error {
return p.err
}

408
cmd/telegraf/printer.go Normal file
View file

@ -0,0 +1,408 @@
package main
import (
_ "embed"
"fmt"
"io"
"sort"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/choice"
"github.com/influxdata/telegraf/plugins/aggregators"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/plugins/secretstores"
)
var (
// Default sections
sectionDefaults = []string{"global_tags", "agent", "secretstores", "outputs", "processors", "aggregators", "inputs"}
// Default input plugins
inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel", "processes", "disk", "diskio"}
// Default output plugins
outputDefaults = make([]string, 0)
)
var header = `# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
`
var globalTagsConfig = `
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
`
// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the agentConfig data.
//
//go:embed agent.conf
var agentConfig string
var secretstoreHeader = `
###############################################################################
# SECRETSTORE PLUGINS #
###############################################################################
`
var outputHeader = `
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
`
var processorHeader = `
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
`
var aggregatorHeader = `
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
`
var inputHeader = `
###############################################################################
# INPUT PLUGINS #
###############################################################################
`
var serviceInputHeader = `
###############################################################################
# SERVICE INPUT PLUGINS #
###############################################################################
`
// printSampleConfig prints the sample config
func printSampleConfig(outputBuffer io.Writer, filters Filters) {
sectionFilters := filters.section
inputFilters := filters.input
outputFilters := filters.output
aggregatorFilters := filters.aggregator
processorFilters := filters.processor
secretstoreFilters := filters.secretstore
// print headers
outputBuffer.Write([]byte(header))
if len(sectionFilters) == 0 {
sectionFilters = sectionDefaults
}
printFilteredGlobalSections(sectionFilters, outputBuffer)
// print secretstore plugins
if choice.Contains("secretstores", sectionFilters) {
if len(secretstoreFilters) != 0 {
if len(secretstoreFilters) >= 3 && secretstoreFilters[1] != "none" {
fmt.Print(secretstoreHeader)
}
printFilteredSecretstores(secretstoreFilters, false, outputBuffer)
} else {
fmt.Print(secretstoreHeader)
snames := make([]string, 0, len(secretstores.SecretStores))
for sname := range secretstores.SecretStores {
snames = append(snames, sname)
}
sort.Strings(snames)
printFilteredSecretstores(snames, true, outputBuffer)
}
}
// print output plugins
if choice.Contains("outputs", sectionFilters) {
if len(outputFilters) != 0 {
if len(outputFilters) >= 3 && outputFilters[1] != "none" {
outputBuffer.Write([]byte(outputHeader))
}
printFilteredOutputs(outputFilters, false, outputBuffer)
} else {
outputBuffer.Write([]byte(outputHeader))
printFilteredOutputs(outputDefaults, false, outputBuffer)
// Print non-default outputs, commented
var pnames []string
for pname := range outputs.Outputs {
if !choice.Contains(pname, outputDefaults) {
pnames = append(pnames, pname)
}
}
printFilteredOutputs(pnames, true, outputBuffer)
}
}
// print processor plugins
if choice.Contains("processors", sectionFilters) {
if len(processorFilters) != 0 {
if len(processorFilters) >= 3 && processorFilters[1] != "none" {
outputBuffer.Write([]byte(processorHeader))
}
printFilteredProcessors(processorFilters, false, outputBuffer)
} else {
outputBuffer.Write([]byte(processorHeader))
pnames := make([]string, 0, len(processors.Processors))
for pname := range processors.Processors {
pnames = append(pnames, pname)
}
printFilteredProcessors(pnames, true, outputBuffer)
}
}
// print aggregator plugins
if choice.Contains("aggregators", sectionFilters) {
if len(aggregatorFilters) != 0 {
if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" {
outputBuffer.Write([]byte(aggregatorHeader))
}
printFilteredAggregators(aggregatorFilters, false, outputBuffer)
} else {
outputBuffer.Write([]byte(aggregatorHeader))
pnames := make([]string, 0, len(aggregators.Aggregators))
for pname := range aggregators.Aggregators {
pnames = append(pnames, pname)
}
printFilteredAggregators(pnames, true, outputBuffer)
}
}
// print input plugins
if choice.Contains("inputs", sectionFilters) {
if len(inputFilters) != 0 {
if len(inputFilters) >= 3 && inputFilters[1] != "none" {
outputBuffer.Write([]byte(inputHeader))
}
printFilteredInputs(inputFilters, false, outputBuffer)
} else {
outputBuffer.Write([]byte(inputHeader))
printFilteredInputs(inputDefaults, false, outputBuffer)
// Print non-default inputs, commented
var pnames []string
for pname := range inputs.Inputs {
if !choice.Contains(pname, inputDefaults) {
pnames = append(pnames, pname)
}
}
printFilteredInputs(pnames, true, outputBuffer)
}
}
}
func printFilteredProcessors(processorFilters []string, commented bool, outputBuffer io.Writer) {
// Filter processors
var pnames []string
for pname := range processors.Processors {
if choice.Contains(pname, processorFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// Print Outputs
for _, pname := range pnames {
creator := processors.Processors[pname]
output := creator()
printConfig(pname, output, "processors", commented, processors.Deprecations[pname], outputBuffer)
}
}
func printFilteredAggregators(aggregatorFilters []string, commented bool, outputBuffer io.Writer) {
// Filter outputs
var anames []string
for aname := range aggregators.Aggregators {
if choice.Contains(aname, aggregatorFilters) {
anames = append(anames, aname)
}
}
sort.Strings(anames)
// Print Outputs
for _, aname := range anames {
creator := aggregators.Aggregators[aname]
output := creator()
printConfig(aname, output, "aggregators", commented, aggregators.Deprecations[aname], outputBuffer)
}
}
func printFilteredInputs(inputFilters []string, commented bool, outputBuffer io.Writer) {
// Filter inputs
var pnames []string
for pname := range inputs.Inputs {
if choice.Contains(pname, inputFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// cache service inputs to print them at the end
servInputs := make(map[string]telegraf.ServiceInput)
// for alphabetical looping:
servInputNames := make([]string, 0, len(pnames))
// Print Inputs
for _, pname := range pnames {
// Skip inputs that are registered twice for backward compatibility
switch pname {
case "cisco_telemetry_gnmi", "http_listener", "io", "KNXListener":
continue
}
creator := inputs.Inputs[pname]
input := creator()
if p, ok := input.(telegraf.ServiceInput); ok {
servInputs[pname] = p
servInputNames = append(servInputNames, pname)
continue
}
printConfig(pname, input, "inputs", commented, inputs.Deprecations[pname], outputBuffer)
}
// Print Service Inputs
if len(servInputs) == 0 {
return
}
sort.Strings(servInputNames)
outputBuffer.Write([]byte(serviceInputHeader))
for _, name := range servInputNames {
printConfig(name, servInputs[name], "inputs", commented, inputs.Deprecations[name], outputBuffer)
}
}
func printFilteredOutputs(outputFilters []string, commented bool, outputBuffer io.Writer) {
// Filter outputs
var onames []string
var influxdbV2 string
for oname := range outputs.Outputs {
if choice.Contains(oname, outputFilters) {
// Make influxdb_v2 the exception and have it be first in the list
// Store it and add it later
if oname == "influxdb_v2" {
influxdbV2 = oname
continue
}
onames = append(onames, oname)
}
}
sort.Strings(onames)
if influxdbV2 != "" {
onames = append([]string{influxdbV2}, onames...)
}
// Print Outputs
for _, oname := range onames {
creator := outputs.Outputs[oname]
output := creator()
printConfig(oname, output, "outputs", commented, outputs.Deprecations[oname], outputBuffer)
}
}
func printFilteredSecretstores(secretstoreFilters []string, commented bool, outputBuffer io.Writer) {
// Filter secretstores
var snames []string
for sname := range secretstores.SecretStores {
if choice.Contains(sname, secretstoreFilters) {
snames = append(snames, sname)
}
}
sort.Strings(snames)
// Print SecretStores
for _, sname := range snames {
creator := secretstores.SecretStores[sname]
store := creator("dummy")
printConfig(sname, store, "secretstores", commented, secretstores.Deprecations[sname], outputBuffer)
}
}
func printFilteredGlobalSections(sectionFilters []string, outputBuffer io.Writer) {
if choice.Contains("global_tags", sectionFilters) {
outputBuffer.Write([]byte(globalTagsConfig))
}
if choice.Contains("agent", sectionFilters) {
outputBuffer.Write([]byte(agentConfig))
}
}
func printConfig(name string, p telegraf.PluginDescriber, op string, commented bool, di telegraf.DeprecationInfo, outputBuffer io.Writer) {
comment := ""
if commented {
comment = "# "
}
if di.Since != "" {
removalNote := ""
if di.RemovalIn != "" {
removalNote = " and will be removed in " + di.RemovalIn
}
fmt.Fprintf(outputBuffer, "\n%s## DEPRECATED: The %q plugin is deprecated in version %s%s, %s.",
comment, name, di.Since, removalNote, di.Notice)
}
sample := p.SampleConfig()
if sample == "" {
fmt.Fprintf(outputBuffer, "\n#[[%s.%s]]", op, name)
fmt.Fprintf(outputBuffer, "\n%s # no configuration\n\n", comment)
} else {
lines := strings.Split(sample, "\n")
outputBuffer.Write([]byte("\n"))
for i, line := range lines {
if i == len(lines)-1 {
outputBuffer.Write([]byte("\n"))
continue
}
outputBuffer.Write([]byte(strings.TrimRight(comment+line, " ") + "\n"))
}
}
}
// PrintInputConfig prints the config usage of a single input.
func PrintInputConfig(name string, outputBuffer io.Writer) error {
creator, ok := inputs.Inputs[name]
if !ok {
return fmt.Errorf("input %s not found", name)
}
printConfig(name, creator(), "inputs", false, inputs.Deprecations[name], outputBuffer)
return nil
}
// PrintOutputConfig prints the config usage of a single output.
func PrintOutputConfig(name string, outputBuffer io.Writer) error {
creator, ok := outputs.Outputs[name]
if !ok {
return fmt.Errorf("output %s not found", name)
}
printConfig(name, creator(), "outputs", false, outputs.Deprecations[name], outputBuffer)
return nil
}

493
cmd/telegraf/telegraf.go Normal file
View file

@ -0,0 +1,493 @@
package main
import (
"context"
"errors"
"fmt"
"log"
"net/http"
"net/url"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/coreos/go-systemd/v22/daemon"
"github.com/fatih/color"
"github.com/influxdata/tail/watch"
"gopkg.in/tomb.v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/agent"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/logger"
"github.com/influxdata/telegraf/plugins/aggregators"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/plugins/secretstores"
)
var stop chan struct{}
type GlobalFlags struct {
config []string
configDir []string
testWait int
configURLRetryAttempts int
configURLWatchInterval time.Duration
watchConfig string
watchInterval time.Duration
pidFile string
plugindDir string
password string
oldEnvBehavior bool
printPluginConfigSource bool
test bool
debug bool
once bool
quiet bool
unprotected bool
}
type WindowFlags struct {
service string
serviceName string
serviceDisplayName string
serviceRestartDelay string
serviceAutoRestart bool
console bool
}
type App interface {
Init(<-chan error, Filters, GlobalFlags, WindowFlags)
Run() error
// Secret store commands
ListSecretStores() ([]string, error)
GetSecretStore(string) (telegraf.SecretStore, error)
}
type Telegraf struct {
pprofErr <-chan error
inputFilters []string
outputFilters []string
configFiles []string
secretstoreFilters []string
cfg *config.Config
GlobalFlags
WindowFlags
}
func (t *Telegraf) Init(pprofErr <-chan error, f Filters, g GlobalFlags, w WindowFlags) {
t.pprofErr = pprofErr
t.inputFilters = f.input
t.outputFilters = f.output
t.secretstoreFilters = f.secretstore
t.GlobalFlags = g
t.WindowFlags = w
// Disable secret protection before performing any other operation
if g.unprotected {
log.Println("W! Running without secret protection!")
config.DisableSecretProtection()
}
// Set global password
if g.password != "" {
config.Password = config.NewSecret([]byte(g.password))
}
// Set environment replacement behavior
config.OldEnvVarReplacement = g.oldEnvBehavior
config.PrintPluginConfigSource = g.printPluginConfigSource
}
func (t *Telegraf) ListSecretStores() ([]string, error) {
c, err := t.loadConfiguration()
if err != nil {
return nil, err
}
ids := make([]string, 0, len(c.SecretStores))
for k := range c.SecretStores {
ids = append(ids, k)
}
return ids, nil
}
func (t *Telegraf) GetSecretStore(id string) (telegraf.SecretStore, error) {
t.quiet = true
c, err := t.loadConfiguration()
if err != nil {
return nil, err
}
store, found := c.SecretStores[id]
if !found {
return nil, errors.New("unknown secret store")
}
return store, nil
}
func (t *Telegraf) reloadLoop() error {
reloadConfig := false
reload := make(chan bool, 1)
reload <- true
for <-reload {
reload <- false
ctx, cancel := context.WithCancel(context.Background())
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt, syscall.SIGHUP,
syscall.SIGTERM, syscall.SIGINT)
if t.watchConfig != "" {
for _, fConfig := range t.configFiles {
if isURL(fConfig) {
continue
}
if _, err := os.Stat(fConfig); err != nil {
log.Printf("W! Cannot watch config %s: %s", fConfig, err)
} else {
go t.watchLocalConfig(ctx, signals, fConfig)
}
}
for _, fConfigDirectory := range t.configDir {
if _, err := os.Stat(fConfigDirectory); err != nil {
log.Printf("W! Cannot watch config directory %s: %s", fConfigDirectory, err)
} else {
go t.watchLocalConfig(ctx, signals, fConfigDirectory)
}
}
}
if t.configURLWatchInterval > 0 {
remoteConfigs := make([]string, 0)
for _, fConfig := range t.configFiles {
if isURL(fConfig) {
remoteConfigs = append(remoteConfigs, fConfig)
}
}
if len(remoteConfigs) > 0 {
go t.watchRemoteConfigs(ctx, signals, t.configURLWatchInterval, remoteConfigs)
}
}
go func() {
select {
case sig := <-signals:
if sig == syscall.SIGHUP {
log.Println("I! Reloading Telegraf config")
// May need to update the list of known config files
// if a delete or create occured. That way on the reload
// we ensure we watch the correct files.
if err := t.getConfigFiles(); err != nil {
log.Println("E! Error loading config files: ", err)
}
<-reload
reload <- true
}
cancel()
case err := <-t.pprofErr:
log.Printf("E! pprof server failed: %v", err)
cancel()
case <-stop:
cancel()
}
}()
err := t.runAgent(ctx, reloadConfig)
if err != nil && !errors.Is(err, context.Canceled) {
return fmt.Errorf("[telegraf] Error running agent: %w", err)
}
reloadConfig = true
}
return nil
}
func (t *Telegraf) watchLocalConfig(ctx context.Context, signals chan os.Signal, fConfig string) {
var mytomb tomb.Tomb
var watcher watch.FileWatcher
if t.watchConfig == "poll" {
if t.watchInterval > 0 {
watcher = watch.NewPollingFileWatcherWithDuration(fConfig, t.watchInterval)
} else {
watcher = watch.NewPollingFileWatcher(fConfig)
}
} else {
watcher = watch.NewInotifyFileWatcher(fConfig)
}
changes, err := watcher.ChangeEvents(&mytomb, 0)
if err != nil {
log.Printf("E! Error watching config file/directory %q: %s\n", fConfig, err)
return
}
log.Printf("I! Config watcher started for %s\n", fConfig)
select {
case <-ctx.Done():
mytomb.Done()
return
case <-changes.Modified:
log.Printf("I! Config file/directory %q modified\n", fConfig)
case <-changes.Deleted:
// deleted can mean moved. wait a bit a check existence
<-time.After(time.Second)
if _, err := os.Stat(fConfig); err == nil {
log.Printf("I! Config file/directory %q overwritten\n", fConfig)
} else {
log.Printf("W! Config file/directory %q deleted\n", fConfig)
}
case <-changes.Truncated:
log.Printf("I! Config file/directory %q truncated\n", fConfig)
case <-changes.Created:
log.Printf("I! Config directory %q has new file(s)\n", fConfig)
case <-mytomb.Dying():
log.Printf("I! Config watcher %q ended\n", fConfig)
return
}
mytomb.Done()
signals <- syscall.SIGHUP
}
func (*Telegraf) watchRemoteConfigs(ctx context.Context, signals chan os.Signal, interval time.Duration, remoteConfigs []string) {
configs := strings.Join(remoteConfigs, ", ")
log.Printf("I! Remote config watcher started for: %s\n", configs)
ticker := time.NewTicker(interval)
defer ticker.Stop()
lastModified := make(map[string]string, len(remoteConfigs))
for {
select {
case <-ctx.Done():
return
case <-signals:
return
case <-ticker.C:
for _, configURL := range remoteConfigs {
req, err := http.NewRequest("HEAD", configURL, nil)
if err != nil {
log.Printf("W! Creating request for fetching config from %q failed: %v\n", configURL, err)
continue
}
if v, exists := os.LookupEnv("INFLUX_TOKEN"); exists {
req.Header.Add("Authorization", "Token "+v)
}
req.Header.Set("User-Agent", internal.ProductToken())
resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Printf("W! Fetching config from %q failed: %v\n", configURL, err)
continue
}
resp.Body.Close()
modified := resp.Header.Get("Last-Modified")
if modified == "" {
log.Printf("E! Last-Modified header not found, stopping the watcher for %s\n", configURL)
delete(lastModified, configURL)
}
if lastModified[configURL] == "" {
lastModified[configURL] = modified
} else if lastModified[configURL] != modified {
log.Printf("I! Remote config modified: %s\n", configURL)
signals <- syscall.SIGHUP
return
}
}
}
}
}
func (t *Telegraf) loadConfiguration() (*config.Config, error) {
// If no other options are specified, load the config file and run.
c := config.NewConfig()
c.Agent.Quiet = t.quiet
c.Agent.ConfigURLRetryAttempts = t.configURLRetryAttempts
c.OutputFilters = t.outputFilters
c.InputFilters = t.inputFilters
c.SecretStoreFilters = t.secretstoreFilters
if err := t.getConfigFiles(); err != nil {
return c, err
}
if err := c.LoadAll(t.configFiles...); err != nil {
return c, err
}
return c, nil
}
func (t *Telegraf) getConfigFiles() error {
var configFiles []string
configFiles = append(configFiles, t.config...)
for _, fConfigDirectory := range t.configDir {
files, err := config.WalkDirectory(fConfigDirectory)
if err != nil {
return err
}
configFiles = append(configFiles, files...)
}
// load default config paths if none are found
if len(configFiles) == 0 {
defaultFiles, err := config.GetDefaultConfigPath()
if err != nil {
return fmt.Errorf("unable to load default config paths: %w", err)
}
configFiles = append(configFiles, defaultFiles...)
}
t.configFiles = configFiles
return nil
}
func (t *Telegraf) runAgent(ctx context.Context, reloadConfig bool) error {
c := t.cfg
var err error
if reloadConfig {
if c, err = t.loadConfiguration(); err != nil {
return err
}
}
if !t.test && t.testWait == 0 && len(c.Outputs) == 0 {
return errors.New("no outputs found, probably invalid config file provided")
}
if t.plugindDir == "" && len(c.Inputs) == 0 {
return errors.New("no inputs found, probably invalid config file provided")
}
if int64(c.Agent.Interval) <= 0 {
return fmt.Errorf("agent interval must be positive, found %v", c.Agent.Interval)
}
if int64(c.Agent.FlushInterval) <= 0 {
return fmt.Errorf("agent flush_interval must be positive; found %v", c.Agent.Interval)
}
// Setup logging as configured.
logConfig := &logger.Config{
Debug: c.Agent.Debug || t.debug,
Quiet: c.Agent.Quiet || t.quiet,
LogTarget: c.Agent.LogTarget,
LogFormat: c.Agent.LogFormat,
Logfile: c.Agent.Logfile,
StructuredLogMessageKey: c.Agent.StructuredLogMessageKey,
RotationInterval: time.Duration(c.Agent.LogfileRotationInterval),
RotationMaxSize: int64(c.Agent.LogfileRotationMaxSize),
RotationMaxArchives: c.Agent.LogfileRotationMaxArchives,
LogWithTimezone: c.Agent.LogWithTimezone,
}
if err := logger.SetupLogging(logConfig); err != nil {
return err
}
log.Printf("I! Starting Telegraf %s%s brought to you by InfluxData the makers of InfluxDB", internal.Version, internal.Customized)
log.Printf("I! Available plugins: %d inputs, %d aggregators, %d processors, %d parsers, %d outputs, %d secret-stores",
len(inputs.Inputs),
len(aggregators.Aggregators),
len(processors.Processors),
len(parsers.Parsers),
len(outputs.Outputs),
len(secretstores.SecretStores),
)
log.Printf("I! Loaded inputs: %s\n%s", strings.Join(c.InputNames(), " "), c.InputNamesWithSources())
log.Printf("I! Loaded aggregators: %s\n%s", strings.Join(c.AggregatorNames(), " "), c.AggregatorNamesWithSources())
log.Printf("I! Loaded processors: %s\n%s", strings.Join(c.ProcessorNames(), " "), c.ProcessorNamesWithSources())
log.Printf("I! Loaded secretstores: %s\n%s", strings.Join(c.SecretstoreNames(), " "), c.SecretstoreNamesWithSources())
if !t.once && (t.test || t.testWait != 0) {
log.Print("W! " + color.RedString("Outputs are not used in testing mode!"))
} else {
log.Printf("I! Loaded outputs: %s\n%s", strings.Join(c.OutputNames(), " "), c.OutputNamesWithSources())
}
log.Printf("I! Tags enabled: %s", c.ListTags())
if count, found := c.Deprecations["inputs"]; found && (count[0] > 0 || count[1] > 0) {
log.Printf("W! Deprecated inputs: %d and %d options", count[0], count[1])
}
if count, found := c.Deprecations["aggregators"]; found && (count[0] > 0 || count[1] > 0) {
log.Printf("W! Deprecated aggregators: %d and %d options", count[0], count[1])
}
if count, found := c.Deprecations["processors"]; found && (count[0] > 0 || count[1] > 0) {
log.Printf("W! Deprecated processors: %d and %d options", count[0], count[1])
}
if count, found := c.Deprecations["outputs"]; found && (count[0] > 0 || count[1] > 0) {
log.Printf("W! Deprecated outputs: %d and %d options", count[0], count[1])
}
if count, found := c.Deprecations["secretstores"]; found && (count[0] > 0 || count[1] > 0) {
log.Printf("W! Deprecated secretstores: %d and %d options", count[0], count[1])
}
// Compute the amount of locked memory needed for the secrets
if !t.GlobalFlags.unprotected {
required := 3 * c.NumberSecrets * uint64(os.Getpagesize())
available := getLockedMemoryLimit()
if required > available {
required /= 1024
available /= 1024
log.Printf("I! Found %d secrets...", c.NumberSecrets)
msg := fmt.Sprintf("Insufficient lockable memory %dkb when %dkb is required.", available, required)
msg += " Please increase the limit for Telegraf in your Operating System!"
log.Print("W! " + color.RedString(msg))
}
}
ag := agent.NewAgent(c)
// Notify systemd that telegraf is ready
// SdNotify() only tries to notify if the NOTIFY_SOCKET environment is set, so it's safe to call when systemd isn't present.
// Ignore the return values here because they're not valid for platforms that don't use systemd.
// For platforms that use systemd, telegraf doesn't log if the notification failed.
//nolint:errcheck // see above
daemon.SdNotify(false, daemon.SdNotifyReady)
if t.once {
wait := time.Duration(t.testWait) * time.Second
return ag.Once(ctx, wait)
}
if t.test || t.testWait != 0 {
wait := time.Duration(t.testWait) * time.Second
return ag.Test(ctx, wait)
}
if t.pidFile != "" {
f, err := os.OpenFile(t.pidFile, os.O_CREATE|os.O_WRONLY, 0640)
if err != nil {
log.Printf("E! Unable to create pidfile: %s", err)
} else {
fmt.Fprintf(f, "%d\n", os.Getpid())
err = f.Close()
if err != nil {
return err
}
defer func() {
err := os.Remove(t.pidFile)
if err != nil {
log.Printf("E! Unable to remove pidfile: %s", err)
}
}()
}
}
return ag.Run(ctx)
}
// isURL checks if string is valid url
func isURL(str string) bool {
u, err := url.Parse(str)
return err == nil && u.Scheme != "" && u.Host != ""
}

View file

@ -0,0 +1,42 @@
//go:build !windows
package main
import (
"log"
"runtime"
"syscall"
)
func (t *Telegraf) Run() error {
stop = make(chan struct{})
defer close(stop)
cfg, err := t.loadConfiguration()
if err != nil {
return err
}
t.cfg = cfg
return t.reloadLoop()
}
func getLockedMemoryLimit() uint64 {
var rLimitMemlock int
switch runtime.GOOS {
case "dragonfly", "freebsd", "netbsd", "openbsd":
// From https://cgit.freebsd.org/src/tree/sys/sys/resource.h#n107
rLimitMemlock = 6
default:
// From https://elixir.bootlin.com/linux/latest/source/include/uapi/asm-generic/resource.h#L35
rLimitMemlock = 8
}
var limit syscall.Rlimit
if err := syscall.Getrlimit(rLimitMemlock, &limit); err != nil {
log.Printf("E! Cannot get limit for locked memory: %v", err)
return 0
}
//nolint:unconvert // required for e.g. FreeBSD that has the field as int64
return uint64(limit.Max)
}

View file

@ -0,0 +1,408 @@
//go:build windows
//go:generate ../../scripts/windows-gen-syso.sh $GOARCH
package main
import (
"fmt"
"log"
"os"
"path/filepath"
"syscall"
"time"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc"
"golang.org/x/sys/windows/svc/eventlog"
"golang.org/x/sys/windows/svc/mgr"
)
func getLockedMemoryLimit() uint64 {
handle := windows.CurrentProcess()
var low, high uintptr
var flag uint32
windows.GetProcessWorkingSetSizeEx(handle, &low, &high, &flag)
return uint64(high)
}
func (t *Telegraf) Run() error {
// Process the service commands
if t.service != "" {
fmt.Println("The use of --service is deprecated, please use the 'service' command instead!")
switch t.service {
case "install":
cfg := &serviceConfig{
displayName: t.serviceDisplayName,
restartDelay: t.serviceRestartDelay,
autoRestart: t.serviceAutoRestart,
configs: t.config,
configDirs: t.configDir,
watchConfig: t.watchConfig,
}
if err := installService(t.serviceName, cfg); err != nil {
return err
}
fmt.Printf("Successfully installed service %q\n", t.serviceName)
case "uninstall":
if err := uninstallService(t.serviceName); err != nil {
return err
}
fmt.Printf("Successfully uninstalled service %q\n", t.serviceName)
case "start":
if err := startService(t.serviceName); err != nil {
return err
}
fmt.Printf("Successfully started service %q\n", t.serviceName)
case "stop":
if err := stopService(t.serviceName); err != nil {
return err
}
fmt.Printf("Successfully stopped service %q\n", t.serviceName)
case "status":
status, err := queryService(t.serviceName)
if err != nil {
return err
}
fmt.Printf("Service %q is in %q state\n", t.serviceName, status)
default:
return fmt.Errorf("invalid service command %q", t.service)
}
return nil
}
// Determine if Telegraf is started as a Windows service.
isWinService, err := svc.IsWindowsService()
if err != nil {
return fmt.Errorf("cannot determine if run as Windows service: %w", err)
}
if !t.console && isWinService {
return svc.Run(t.serviceName, t)
}
// Load the configuration file(s)
cfg, err := t.loadConfiguration()
if err != nil {
return err
}
t.cfg = cfg
stop = make(chan struct{})
defer close(stop)
return t.reloadLoop()
}
// Handler for the Windows service framework
func (t *Telegraf) Execute(_ []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (bool, uint32) {
// Mark the status as startup pending until we are fully started
const accepted = svc.AcceptStop | svc.AcceptShutdown
changes <- svc.Status{State: svc.StartPending}
defer func() {
changes <- svc.Status{State: svc.Stopped}
}()
// Create a eventlog logger for all service related things
svclog, err := eventlog.Open(t.serviceName)
if err != nil {
log.Printf("E! Initializing the service logger failed: %s", err)
return true, 1
}
defer svclog.Close()
// Load the configuration file(s)
cfg, err := t.loadConfiguration()
if err != nil {
if lerr := svclog.Error(100, err.Error()); lerr != nil {
log.Printf("E! Logging error %q failed: %s", err, lerr)
}
return true, 2
}
t.cfg = cfg
// Actually start the processing loop in the background to be able to
// react to service change requests
loopErr := make(chan error)
stop = make(chan struct{})
defer close(loopErr)
defer close(stop)
go func() {
loopErr <- t.reloadLoop()
}()
changes <- svc.Status{State: svc.Running, Accepts: accepted}
for {
select {
case err := <-loopErr:
if err != nil {
if lerr := svclog.Error(100, err.Error()); lerr != nil {
log.Printf("E! Logging error %q failed: %s", err, lerr)
}
return true, 3
}
return false, 0
case c := <-r:
switch c.Cmd {
case svc.Interrogate:
changes <- c.CurrentStatus
// Testing deadlock from https://code.google.com/p/winsvc/issues/detail?id=4
time.Sleep(100 * time.Millisecond)
changes <- c.CurrentStatus
case svc.Stop, svc.Shutdown:
changes <- svc.Status{State: svc.StopPending}
var empty struct{}
stop <- empty // signal reloadLoop to finish (context cancel)
default:
msg := fmt.Sprintf("Unexpected control request #%d", c)
if lerr := svclog.Error(100, msg); lerr != nil {
log.Printf("E! Logging error %q failed: %s", msg, lerr)
}
}
}
}
}
type serviceConfig struct {
displayName string
restartDelay string
autoRestart bool
// Telegraf parameters
configs []string
configDirs []string
watchConfig string
}
func installService(name string, cfg *serviceConfig) error {
// Determine the executable to use in the service
executable, err := os.Executable()
if err != nil {
return fmt.Errorf("determining executable failed: %w", err)
}
// Determine the program files directory name
programFiles := os.Getenv("ProgramFiles")
if programFiles == "" { // Should never happen
programFiles = "C:\\Program Files"
}
// Collect the command line arguments
args := make([]string, 0, 2*(len(cfg.configs)+len(cfg.configDirs))+2)
for _, fn := range cfg.configs {
args = append(args, "--config", fn)
}
for _, dn := range cfg.configDirs {
args = append(args, "--config-directory", dn)
}
if len(args) == 0 {
args = append(args, "--config", filepath.Join(programFiles, "Telegraf", "telegraf.conf"))
}
if cfg.watchConfig != "" {
args = append(args, "--watch-config", cfg.watchConfig)
}
// Pass the service name to the command line, to have a custom name when relaunching as a service
args = append(args, "--service-name", name)
// Create a configuration for the service
svccfg := mgr.Config{
DisplayName: cfg.displayName,
Description: "Collects, processes and publishes data using a series of plugins.",
StartType: mgr.StartAutomatic,
ServiceType: windows.SERVICE_WIN32_OWN_PROCESS,
}
// Connect to the service manager and try to install the service if it
// doesn't exist. Fail on existing service and stop installation.
svcmgr, err := mgr.Connect()
if err != nil {
return fmt.Errorf("connecting to service manager failed: %w", err)
}
defer svcmgr.Disconnect()
if service, err := svcmgr.OpenService(name); err == nil {
service.Close()
return fmt.Errorf("service %q is already installed", name)
}
service, err := svcmgr.CreateService(name, executable, svccfg, args...)
if err != nil {
return fmt.Errorf("creating service failed: %w", err)
}
defer service.Close()
// Set the recovery strategy to restart with a fixed period of 10 seconds
// and the user specified delay if requested
if cfg.autoRestart {
delay, err := time.ParseDuration(cfg.restartDelay)
if err != nil {
return fmt.Errorf("cannot parse restart delay %q: %w", cfg.restartDelay, err)
}
recovery := []mgr.RecoveryAction{{Type: mgr.ServiceRestart, Delay: delay}}
if err := service.SetRecoveryActions(recovery, 10); err != nil {
return err
}
}
// Register the event as a source of eventlog events
events := uint32(eventlog.Error | eventlog.Warning | eventlog.Info)
if err := eventlog.InstallAsEventCreate(name, events); err != nil {
//nolint:errcheck // Try to remove the service on best effort basis as we cannot handle any error here
service.Delete()
return fmt.Errorf("setting up eventlog source failed: %w", err)
}
return nil
}
func uninstallService(name string) error {
// Connect to the service manager and try to open the service. In case the
// service is not installed, return with the corresponding error.
svcmgr, err := mgr.Connect()
if err != nil {
return fmt.Errorf("connecting to service manager failed: %w", err)
}
defer svcmgr.Disconnect()
service, err := svcmgr.OpenService(name)
if err != nil {
return fmt.Errorf("opening service failed: %w", err)
}
defer service.Close()
// Uninstall the service and remove the eventlog source
if err := service.Delete(); err != nil {
return fmt.Errorf("uninstalling service failed: %w", err)
}
if err := eventlog.Remove(name); err != nil {
return fmt.Errorf("removing eventlog source failed: %w", err)
}
return nil
}
func startService(name string) error {
nameUTF16, err := syscall.UTF16PtrFromString(name)
if err != nil {
return fmt.Errorf("conversion of service name %q to UTF16 failed: %w", name, err)
}
// Open the service manager and service with the least privileges required to start the service
mgrhandle, err := windows.OpenSCManager(nil, nil, windows.SC_MANAGER_CONNECT|windows.SC_MANAGER_ENUMERATE_SERVICE)
if err != nil {
return fmt.Errorf("opening service manager failed: %w", err)
}
defer windows.CloseServiceHandle(mgrhandle)
svchandle, err := windows.OpenService(mgrhandle, nameUTF16, windows.SERVICE_QUERY_STATUS|windows.SERVICE_START)
if err != nil {
return fmt.Errorf("opening service failed: %w", err)
}
service := &mgr.Service{Handle: svchandle, Name: name}
defer service.Close()
// Check if the service is actually stopped
status, err := service.Query()
if err != nil {
return fmt.Errorf("querying service state failed: %w", err)
}
if status.State != svc.Stopped {
return fmt.Errorf("service is not stopped but in state %q", stateDescription(status.State))
}
return service.Start()
}
func stopService(name string) error {
nameUTF16, err := syscall.UTF16PtrFromString(name)
if err != nil {
return fmt.Errorf("conversion of service name %q to UTF16 failed: %w", name, err)
}
// Open the service manager and service with the least privileges required to start the service
mgrhandle, err := windows.OpenSCManager(nil, nil, windows.SC_MANAGER_CONNECT|windows.SC_MANAGER_ENUMERATE_SERVICE)
if err != nil {
return fmt.Errorf("opening service manager failed: %w", err)
}
defer windows.CloseServiceHandle(mgrhandle)
svchandle, err := windows.OpenService(mgrhandle, nameUTF16, windows.SERVICE_QUERY_STATUS|windows.SERVICE_STOP)
if err != nil {
return fmt.Errorf("opening service failed: %w", err)
}
service := &mgr.Service{Handle: svchandle, Name: name}
defer service.Close()
// Stop the service and wait for it to finish
status, err := service.Control(svc.Stop)
if err != nil {
return fmt.Errorf("stopping service failed: %w", err)
}
for status.State != svc.Stopped {
// Wait for the hinted time, but clip it to prevent stalling operation
wait := time.Duration(status.WaitHint) * time.Millisecond
if wait < 100*time.Millisecond {
wait = 100 * time.Millisecond
} else if wait > 10*time.Second {
wait = 10 * time.Second
}
time.Sleep(wait)
status, err = service.Query()
if err != nil {
return fmt.Errorf("querying service state failed: %w", err)
}
}
return nil
}
func queryService(name string) (string, error) {
nameUTF16, err := syscall.UTF16PtrFromString(name)
if err != nil {
return "", fmt.Errorf("conversion of service name %q to UTF16 failed: %w", name, err)
}
// Open the service manager and service with the least privileges required to start the service
mgrhandle, err := windows.OpenSCManager(nil, nil, windows.SC_MANAGER_CONNECT|windows.SC_MANAGER_ENUMERATE_SERVICE)
if err != nil {
return "", fmt.Errorf("opening service manager failed: %w", err)
}
defer windows.CloseServiceHandle(mgrhandle)
svchandle, err := windows.OpenService(mgrhandle, nameUTF16, windows.SERVICE_QUERY_STATUS)
if err != nil {
return "", fmt.Errorf("opening service failed: %w", err)
}
service := &mgr.Service{Handle: svchandle, Name: name}
defer service.Close()
// Query the service state and report it to the user
status, err := service.Query()
if err != nil {
return "", fmt.Errorf("querying service state failed: %w", err)
}
return stateDescription(status.State), nil
}
func stateDescription(state svc.State) string {
switch state {
case svc.Stopped:
return "stopped"
case svc.StartPending:
return "start pending"
case svc.StopPending:
return "stop pending"
case svc.Running:
return "running"
case svc.ContinuePending:
return "continue pending"
case svc.PausePending:
return "pause pending"
case svc.Paused:
return "paused"
}
return fmt.Sprintf("unknown %v", state)
}

1
config/README.md Symbolic link
View file

@ -0,0 +1 @@
../docs/CONFIGURATION.md

1924
config/config.go Normal file

File diff suppressed because it is too large Load diff

1540
config/config_test.go Normal file

File diff suppressed because it is too large Load diff

393
config/deprecation.go Normal file
View file

@ -0,0 +1,393 @@
package config
import (
"errors"
"fmt"
"log"
"reflect"
"sort"
"strings"
"github.com/coreos/go-semver/semver"
"github.com/fatih/color"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/processors"
)
// DeprecationInfo contains all important information to describe a deprecated entity
type DeprecationInfo struct {
// Name of the plugin or plugin option
Name string
// LogLevel is the level of deprecation which currently corresponds to a log-level
logLevel telegraf.LogLevel
info telegraf.DeprecationInfo
}
func (di *DeprecationInfo) determineEscalation() error {
di.logLevel = telegraf.None
if di.info.Since == "" {
return nil
}
since, err := semver.NewVersion(di.info.Since)
if err != nil {
return fmt.Errorf("cannot parse 'since' version %q: %w", di.info.Since, err)
}
var removal *semver.Version
if di.info.RemovalIn != "" {
removal, err = semver.NewVersion(di.info.RemovalIn)
if err != nil {
return fmt.Errorf("cannot parse 'removal' version %q: %w", di.info.RemovalIn, err)
}
} else {
removal = &semver.Version{Major: since.Major}
removal.BumpMajor()
di.info.RemovalIn = removal.String()
}
// Drop potential pre-release tags
version := semver.Version{
Major: telegrafVersion.Major,
Minor: telegrafVersion.Minor,
Patch: telegrafVersion.Patch,
}
if !version.LessThan(*removal) {
di.logLevel = telegraf.Error
} else if !version.LessThan(*since) {
di.logLevel = telegraf.Warn
}
return nil
}
// PluginDeprecationInfo holds all information about a deprecated plugin or it's options
type PluginDeprecationInfo struct {
DeprecationInfo
// Options deprecated for this plugin
Options []DeprecationInfo
}
func (c *Config) incrementPluginDeprecations(category string) {
newcounts := []int64{1, 0}
if counts, found := c.Deprecations[category]; found {
newcounts = []int64{counts[0] + 1, counts[1]}
}
c.Deprecations[category] = newcounts
}
func (c *Config) incrementPluginOptionDeprecations(category string) {
newcounts := []int64{0, 1}
if counts, found := c.Deprecations[category]; found {
newcounts = []int64{counts[0], counts[1] + 1}
}
c.Deprecations[category] = newcounts
}
func (c *Config) collectDeprecationInfo(category, name string, plugin interface{}, all bool) PluginDeprecationInfo {
info := PluginDeprecationInfo{
DeprecationInfo: DeprecationInfo{
Name: category + "." + name,
logLevel: telegraf.None,
},
}
// First check if the whole plugin is deprecated
switch category {
case "aggregators":
if pi, deprecated := aggregators.Deprecations[name]; deprecated {
info.DeprecationInfo.info = pi
}
case "inputs":
if pi, deprecated := inputs.Deprecations[name]; deprecated {
info.DeprecationInfo.info = pi
}
case "outputs":
if pi, deprecated := outputs.Deprecations[name]; deprecated {
info.DeprecationInfo.info = pi
}
case "processors":
if pi, deprecated := processors.Deprecations[name]; deprecated {
info.DeprecationInfo.info = pi
}
}
if err := info.determineEscalation(); err != nil {
panic(fmt.Errorf("plugin %q: %w", info.Name, err))
}
if info.logLevel != telegraf.None {
c.incrementPluginDeprecations(category)
}
// Allow checking for names only.
if plugin == nil {
return info
}
// Check for deprecated options
walkPluginStruct(reflect.ValueOf(plugin), func(field reflect.StructField, value reflect.Value) {
// Try to report only those fields that are set
if !all && value.IsZero() {
return
}
tags := strings.SplitN(field.Tag.Get("deprecated"), ";", 3)
if len(tags) < 1 || tags[0] == "" {
return
}
optionInfo := DeprecationInfo{Name: field.Name}
optionInfo.info.Since = tags[0]
if len(tags) > 1 {
optionInfo.info.Notice = tags[len(tags)-1]
}
if len(tags) > 2 {
optionInfo.info.RemovalIn = tags[1]
}
if err := optionInfo.determineEscalation(); err != nil {
panic(fmt.Errorf("plugin %q option %q: %w", info.Name, field.Name, err))
}
if optionInfo.logLevel != telegraf.None {
c.incrementPluginOptionDeprecations(category)
}
// Get the toml field name
option := field.Tag.Get("toml")
if option != "" {
optionInfo.Name = option
}
info.Options = append(info.Options, optionInfo)
})
return info
}
func (c *Config) printUserDeprecation(category, name string, plugin interface{}) error {
info := c.collectDeprecationInfo(category, name, plugin, false)
printPluginDeprecationNotice(info.logLevel, info.Name, info.info)
if info.logLevel == telegraf.Error {
return errors.New("plugin deprecated")
}
// Print deprecated options
deprecatedOptions := make([]string, 0)
for _, option := range info.Options {
PrintOptionDeprecationNotice(info.Name, option.Name, option.info)
if option.logLevel == telegraf.Error {
deprecatedOptions = append(deprecatedOptions, option.Name)
}
}
if len(deprecatedOptions) > 0 {
return fmt.Errorf("plugin options %q deprecated", strings.Join(deprecatedOptions, ","))
}
return nil
}
func (c *Config) CollectDeprecationInfos(inFilter, outFilter, aggFilter, procFilter []string) map[string][]PluginDeprecationInfo {
infos := make(map[string][]PluginDeprecationInfo)
infos["inputs"] = make([]PluginDeprecationInfo, 0)
for name, creator := range inputs.Inputs {
if len(inFilter) > 0 && !sliceContains(name, inFilter) {
continue
}
plugin := creator()
info := c.collectDeprecationInfo("inputs", name, plugin, true)
if info.logLevel != telegraf.None || len(info.Options) > 0 {
infos["inputs"] = append(infos["inputs"], info)
}
}
infos["outputs"] = make([]PluginDeprecationInfo, 0)
for name, creator := range outputs.Outputs {
if len(outFilter) > 0 && !sliceContains(name, outFilter) {
continue
}
plugin := creator()
info := c.collectDeprecationInfo("outputs", name, plugin, true)
if info.logLevel != telegraf.None || len(info.Options) > 0 {
infos["outputs"] = append(infos["outputs"], info)
}
}
infos["processors"] = make([]PluginDeprecationInfo, 0)
for name, creator := range processors.Processors {
if len(procFilter) > 0 && !sliceContains(name, procFilter) {
continue
}
plugin := creator()
info := c.collectDeprecationInfo("processors", name, plugin, true)
if info.logLevel != telegraf.None || len(info.Options) > 0 {
infos["processors"] = append(infos["processors"], info)
}
}
infos["aggregators"] = make([]PluginDeprecationInfo, 0)
for name, creator := range aggregators.Aggregators {
if len(aggFilter) > 0 && !sliceContains(name, aggFilter) {
continue
}
plugin := creator()
info := c.collectDeprecationInfo("aggregators", name, plugin, true)
if info.logLevel != telegraf.None || len(info.Options) > 0 {
infos["aggregators"] = append(infos["aggregators"], info)
}
}
return infos
}
func (*Config) PrintDeprecationList(plugins []PluginDeprecationInfo) {
sort.Slice(plugins, func(i, j int) bool { return plugins[i].Name < plugins[j].Name })
for _, plugin := range plugins {
switch plugin.logLevel {
case telegraf.Warn, telegraf.Error:
fmt.Printf(
" %-40s %-5s since %-5s removal in %-5s %s\n",
plugin.Name, plugin.logLevel, plugin.info.Since, plugin.info.RemovalIn, plugin.info.Notice,
)
}
if len(plugin.Options) < 1 {
continue
}
sort.Slice(plugin.Options, func(i, j int) bool { return plugin.Options[i].Name < plugin.Options[j].Name })
for _, option := range plugin.Options {
fmt.Printf(
" %-40s %-5s since %-5s removal in %-5s %s\n",
plugin.Name+"/"+option.Name, option.logLevel, option.info.Since, option.info.RemovalIn, option.info.Notice,
)
}
}
}
func printHistoricPluginDeprecationNotice(category, name string, info telegraf.DeprecationInfo) {
prefix := "E! " + color.RedString("DeprecationError")
log.Printf(
"%s: Plugin %q deprecated since version %s and removed: %s",
prefix, category+"."+name, info.Since, info.Notice,
)
}
// walkPluginStruct iterates over the fields of a structure in depth-first search (to cover nested structures)
// and calls the given function for every visited field.
func walkPluginStruct(value reflect.Value, fn func(f reflect.StructField, fv reflect.Value)) {
v := reflect.Indirect(value)
t := v.Type()
// Only works on structs
if t.Kind() != reflect.Struct {
return
}
// Walk over the struct fields and call the given function. If we encounter more complex embedded
// elements (structs, slices/arrays, maps) we need to descend into those elements as they might
// contain structures nested in the current structure.
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
fieldValue := v.Field(i)
if field.PkgPath != "" {
continue
}
switch field.Type.Kind() {
case reflect.Struct:
walkPluginStruct(fieldValue, fn)
case reflect.Array, reflect.Slice:
for j := 0; j < fieldValue.Len(); j++ {
element := fieldValue.Index(j)
// The array might contain structs
walkPluginStruct(element, fn)
fn(field, element)
}
case reflect.Map:
iter := fieldValue.MapRange()
for iter.Next() {
element := iter.Value()
// The map might contain structs
walkPluginStruct(element, fn)
fn(field, element)
}
}
fn(field, fieldValue)
}
}
func deprecationPrefix(level telegraf.LogLevel) string {
switch level {
case telegraf.Warn:
return "W! " + color.YellowString("DeprecationWarning")
case telegraf.Error:
return "E! " + color.RedString("DeprecationError")
}
return ""
}
func printPluginDeprecationNotice(level telegraf.LogLevel, name string, info telegraf.DeprecationInfo) {
switch level {
case telegraf.Warn, telegraf.Error:
prefix := deprecationPrefix(level)
log.Printf(
"%s: Plugin %q deprecated since version %s and will be removed in %s: %s",
prefix, name, info.Since, info.RemovalIn, info.Notice,
)
}
}
func PrintOptionDeprecationNotice(plugin, option string, info telegraf.DeprecationInfo) {
// Determine the log-level
di := &DeprecationInfo{
Name: plugin,
info: info,
}
if err := di.determineEscalation(); err != nil {
log.Printf("E! Determining log-level for option %s in plugin %s failed: %v", option, plugin, err)
return
}
switch di.logLevel {
case telegraf.Warn, telegraf.Error:
prefix := deprecationPrefix(di.logLevel)
log.Printf(
"%s: Option %q of plugin %q deprecated since version %s and will be removed in %s: %s",
prefix, option, plugin, di.info.Since, di.info.RemovalIn, di.info.Notice,
)
}
}
func PrintOptionValueDeprecationNotice(plugin, option string, value interface{}, info telegraf.DeprecationInfo) {
// Determine the log-level
di := &DeprecationInfo{
Name: plugin,
info: info,
}
if err := di.determineEscalation(); err != nil {
log.Printf("E! Determining log-level for option %s in plugin %s failed: %v", option, plugin, err)
return
}
switch di.logLevel {
case telegraf.Warn, telegraf.Error:
prefix := deprecationPrefix(di.logLevel)
log.Printf(
`%s: Value "%+v" for option %q of plugin %q deprecated since version %s and will be removed in %s: %s`,
prefix, value, option, plugin, di.info.Since, di.info.RemovalIn, di.info.Notice,
)
}
}

277
config/deprecation_test.go Normal file
View file

@ -0,0 +1,277 @@
package config
import (
"bufio"
"bytes"
"log"
"strings"
"testing"
"time"
"github.com/coreos/go-semver/semver"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
)
func TestPluginDeprecation(t *testing.T) {
info := telegraf.DeprecationInfo{
Since: "1.23.0",
RemovalIn: "2.0.0",
Notice: "please check",
}
var tests = []struct {
name string
level telegraf.LogLevel
expected string
}{
{
name: "Error level",
level: telegraf.Error,
expected: `Plugin "test" deprecated since version 1.23.0 and will be removed in 2.0.0: please check`,
},
{
name: "Warn level",
level: telegraf.Warn,
expected: `Plugin "test" deprecated since version 1.23.0 and will be removed in 2.0.0: please check`,
},
{
name: "None",
level: telegraf.None,
expected: ``,
},
}
// Switch the logger to log to a buffer
var buf bytes.Buffer
scanner := bufio.NewScanner(&buf)
previous := log.Writer()
log.SetOutput(&buf)
defer log.SetOutput(previous)
msg := make(chan string, 1)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf.Reset()
printPluginDeprecationNotice(tt.level, "test", info)
// Wait for a newline to arrive and timeout for cases where
// we don't see a message.
go func() {
scanner.Scan()
msg <- scanner.Text()
}()
// Reduce the timeout if we do not expect a message
timeout := 1 * time.Second
if tt.expected == "" {
timeout = 100 * time.Microsecond
}
var actual string
select {
case actual = <-msg:
case <-time.After(timeout):
}
if tt.expected != "" {
expected := deprecationPrefix(tt.level) + ": " + tt.expected
require.Equal(t, expected, actual)
} else {
require.Empty(t, actual)
}
})
}
}
func TestPluginOptionDeprecation(t *testing.T) {
var tests = []struct {
name string
since string
removal string
expected string
expectedLevel telegraf.LogLevel
}{
{
name: "Error level",
since: "1.23.0",
removal: "1.29.0",
expectedLevel: telegraf.Error,
expected: `Option "option" of plugin "test" deprecated since version 1.23.0 and will be removed in 1.29.0: please check`,
},
{
name: "Warn level",
since: "1.23.0",
removal: "2.0.0",
expectedLevel: telegraf.Warn,
expected: `Option "option" of plugin "test" deprecated since version 1.23.0 and will be removed in 2.0.0: please check`,
},
{
name: "No removal info",
since: "1.23.0",
expectedLevel: telegraf.Warn,
expected: `Option "option" of plugin "test" deprecated since version 1.23.0 and will be removed in 2.0.0: please check`,
},
{
name: "None",
expectedLevel: telegraf.None,
expected: ``,
},
}
// Fake telegraf's version
version, err := semver.NewVersion("1.30.0")
require.NoError(t, err)
telegrafVersion = version
// Switch the logger to log to a buffer
var buf bytes.Buffer
scanner := bufio.NewScanner(&buf)
previous := log.Writer()
log.SetOutput(&buf)
defer log.SetOutput(previous)
msg := make(chan string, 1)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf.Reset()
info := telegraf.DeprecationInfo{
Since: tt.since,
RemovalIn: tt.removal,
Notice: "please check",
}
PrintOptionDeprecationNotice("test", "option", info)
// Wait for a newline to arrive and timeout for cases where
// we don't see a message.
go func() {
scanner.Scan()
msg <- scanner.Text()
}()
// Reduce the timeout if we do not expect a message
timeout := 1 * time.Second
if tt.expected == "" {
timeout = 100 * time.Microsecond
}
var actual string
select {
case actual = <-msg:
case <-time.After(timeout):
}
if tt.expected != "" {
expected := deprecationPrefix(tt.expectedLevel) + ": " + tt.expected
require.Equal(t, expected, actual)
} else {
require.Empty(t, actual)
}
})
}
}
func TestPluginOptionValueDeprecation(t *testing.T) {
var tests = []struct {
name string
since string
removal string
value interface{}
expected string
expectedLevel telegraf.LogLevel
}{
{
name: "Error level",
since: "1.25.0",
removal: "1.29.0",
value: "foobar",
expected: `Value "foobar" for option "option" of plugin "test" deprecated since version 1.25.0 and will be removed in 1.29.0: please check`,
expectedLevel: telegraf.Error,
},
{
name: "Warn level",
since: "1.25.0",
removal: "2.0.0",
value: "foobar",
expected: `Value "foobar" for option "option" of plugin "test" deprecated since version 1.25.0 and will be removed in 2.0.0: please check`,
expectedLevel: telegraf.Warn,
},
{
name: "No removal info",
since: "1.25.0",
value: "foobar",
expected: `Value "foobar" for option "option" of plugin "test" deprecated since version 1.25.0 and will be removed in 2.0.0: please check`,
expectedLevel: telegraf.Warn,
},
{
name: "None",
expected: ``,
expectedLevel: telegraf.None,
},
{
name: "nil value",
since: "1.25.0",
removal: "1.29.0",
value: nil,
expected: `Value "<nil>" for option "option" of plugin "test" deprecated since version 1.25.0 and will be removed in 1.29.0: please check`,
expectedLevel: telegraf.Error,
},
{
name: "Boolean value",
since: "1.25.0",
removal: "1.29.0",
value: true,
expected: `Value "true" for option "option" of plugin "test" deprecated since version 1.25.0 and will be removed in 1.29.0: please check`,
expectedLevel: telegraf.Error,
},
{
name: "Integer value",
since: "1.25.0",
removal: "1.29.0",
value: 123,
expected: `Value "123" for option "option" of plugin "test" deprecated since version 1.25.0 and will be removed in 1.29.0: please check`,
expectedLevel: telegraf.Error,
},
}
// Fake telegraf's version
version, err := semver.NewVersion("1.30.0")
require.NoError(t, err)
telegrafVersion = version
// Switch the logger to log to a buffer
var buf bytes.Buffer
previous := log.Writer()
log.SetOutput(&buf)
defer log.SetOutput(previous)
timeout := 1 * time.Second
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf.Reset()
info := telegraf.DeprecationInfo{
Since: tt.since,
RemovalIn: tt.removal,
Notice: "please check",
}
PrintOptionValueDeprecationNotice("test", "option", tt.value, info)
if tt.expected != "" {
require.Eventually(t, func() bool {
return strings.HasSuffix(buf.String(), "\n")
}, timeout, 100*time.Millisecond)
// Remove the time for comparison
actual := strings.TrimSpace(buf.String())
expected := deprecationPrefix(tt.expectedLevel) + ": " + tt.expected
require.Equal(t, expected, actual)
} else {
time.Sleep(timeout)
require.Empty(t, buf.String())
}
})
}
}

252
config/envvar.go Normal file
View file

@ -0,0 +1,252 @@
package config
import (
"bytes"
"errors"
"io"
"os"
"strings"
"github.com/compose-spec/compose-go/template"
"github.com/compose-spec/compose-go/utils"
)
type trimmer struct {
input *bytes.Reader
output bytes.Buffer
}
func removeComments(buf []byte) ([]byte, error) {
t := &trimmer{
input: bytes.NewReader(buf),
output: bytes.Buffer{},
}
err := t.process()
return t.output.Bytes(), err
}
func (t *trimmer) process() error {
for {
// Read the next byte until EOF
c, err := t.input.ReadByte()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
// Switch states if we need to
switch c {
case '\\':
//nolint:errcheck // next byte is known
t.input.UnreadByte()
err = t.escape()
case '\'':
//nolint:errcheck // next byte is known
t.input.UnreadByte()
if t.hasNQuotes(c, 3) {
err = t.tripleSingleQuote()
} else {
err = t.singleQuote()
}
case '"':
//nolint:errcheck // next byte is known
t.input.UnreadByte()
if t.hasNQuotes(c, 3) {
err = t.tripleDoubleQuote()
} else {
err = t.doubleQuote()
}
case '#':
err = t.comment()
default:
t.output.WriteByte(c)
continue
}
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
}
return nil
}
func (t *trimmer) hasNQuotes(ref byte, limit int64) bool {
var count int64
// Look ahead check if the next characters are what we expect
for count = 0; count < limit; count++ {
c, err := t.input.ReadByte()
if err != nil || c != ref {
break
}
}
// We also need to unread the non-matching character
offset := -count
if count < limit {
offset--
}
//nolint:errcheck // Unread the already matched characters
t.input.Seek(offset, io.SeekCurrent)
return count >= limit
}
func (t *trimmer) readWriteByte() (byte, error) {
c, err := t.input.ReadByte()
if err != nil {
return 0, err
}
return c, t.output.WriteByte(c)
}
func (t *trimmer) escape() error {
//nolint:errcheck // Consume the known starting backslash and quote
t.readWriteByte()
// Read the next character which is the escaped one and exit
_, err := t.readWriteByte()
return err
}
func (t *trimmer) singleQuote() error {
//nolint:errcheck // Consume the known starting quote
t.readWriteByte()
// Read bytes until EOF, line end or another single quote
for {
if c, err := t.readWriteByte(); err != nil || c == '\'' || c == '\n' {
return err
}
}
}
func (t *trimmer) tripleSingleQuote() error {
for i := 0; i < 3; i++ {
//nolint:errcheck // Consume the known starting quotes
t.readWriteByte()
}
// Read bytes until EOF or another set of triple single quotes
for {
c, err := t.readWriteByte()
if err != nil {
return err
}
if c == '\'' && t.hasNQuotes('\'', 2) {
//nolint:errcheck // Consume the two additional ending quotes
t.readWriteByte()
//nolint:errcheck // Consume the two additional ending quotes
t.readWriteByte()
return nil
}
}
}
func (t *trimmer) doubleQuote() error {
//nolint:errcheck // Consume the known starting quote
t.readWriteByte()
// Read bytes until EOF, line end or another double quote
for {
c, err := t.input.ReadByte()
if err != nil {
return err
}
switch c {
case '\\':
//nolint:errcheck // Consume the found escaped character
t.input.UnreadByte()
if err := t.escape(); err != nil {
return err
}
continue
case '"', '\n':
// Found terminator
return t.output.WriteByte(c)
}
t.output.WriteByte(c)
}
}
func (t *trimmer) tripleDoubleQuote() error {
for i := 0; i < 3; i++ {
//nolint:errcheck // Consume the known starting quotes
t.readWriteByte()
}
// Read bytes until EOF or another set of triple double quotes
for {
c, err := t.input.ReadByte()
if err != nil {
return err
}
switch c {
case '\\':
//nolint:errcheck // Consume the found escape character
t.input.UnreadByte()
if err := t.escape(); err != nil {
return err
}
continue
case '"':
t.output.WriteByte(c)
if t.hasNQuotes('"', 2) {
//nolint:errcheck // Consume the two additional ending quotes
t.readWriteByte()
//nolint:errcheck // Consume the two additional ending quotes
t.readWriteByte()
return nil
}
continue
}
t.output.WriteByte(c)
}
}
func (t *trimmer) comment() error {
// Read bytes until EOF or a line break
for {
c, err := t.input.ReadByte()
if err != nil {
return err
}
if c == '\n' {
return t.output.WriteByte(c)
}
}
}
func substituteEnvironment(contents []byte, oldReplacementBehavior bool) ([]byte, error) {
options := []template.Option{
template.WithReplacementFunction(func(s string, m template.Mapping, cfg *template.Config) (string, error) {
result, applied, err := template.DefaultReplacementAppliedFunc(s, m, cfg)
if err == nil && !applied {
// Keep undeclared environment-variable patterns to reproduce
// pre-v1.27 behavior
return s, nil
}
if err != nil && strings.HasPrefix(err.Error(), "Invalid template:") {
// Keep invalid template patterns to ignore regexp substitutions
// like ${1}
return s, nil
}
return result, err
}),
template.WithoutLogging,
}
if oldReplacementBehavior {
options = append(options, template.WithPattern(oldVarRe))
}
envMap := utils.GetAsEqualsMap(os.Environ())
retVal, err := template.SubstituteWithOptions(string(contents), func(k string) (string, bool) {
if v, ok := envMap[k]; ok {
return v, ok
}
return "", false
}, options...)
return []byte(retVal), err
}

411
config/internal_test.go Normal file
View file

@ -0,0 +1,411 @@
package config
import (
"bytes"
"fmt"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestEnvironmentSubstitution(t *testing.T) {
tests := []struct {
name string
setEnv func(*testing.T)
contents string
expected string
wantErr bool
errSubstring string
}{
{
name: "Legacy with ${} and without {}",
setEnv: func(t *testing.T) {
t.Setenv("TEST_ENV1", "VALUE1")
t.Setenv("TEST_ENV2", "VALUE2")
},
contents: "A string with ${TEST_ENV1}, $TEST_ENV2 and $TEST_ENV1 as repeated",
expected: "A string with VALUE1, VALUE2 and VALUE1 as repeated",
},
{
name: "Env not set",
contents: "Env variable ${NOT_SET} will be empty",
expected: "Env variable ${NOT_SET} will be empty",
},
{
name: "Env not set, fallback to default",
contents: "Env variable ${THIS_IS_ABSENT:-Fallback}",
expected: "Env variable Fallback",
},
{
name: "No fallback",
setEnv: func(t *testing.T) {
t.Setenv("MY_ENV1", "VALUE1")
},
contents: "Env variable ${MY_ENV1:-Fallback}",
expected: "Env variable VALUE1",
},
{
name: "Mix and match",
setEnv: func(t *testing.T) {
t.Setenv("MY_VAR", "VALUE")
t.Setenv("MY_VAR2", "VALUE2")
},
contents: "Env var ${MY_VAR} is set, with $MY_VAR syntax and default on this ${MY_VAR1:-Substituted}, no default on this ${MY_VAR2:-NoDefault}",
expected: "Env var VALUE is set, with VALUE syntax and default on this Substituted, no default on this VALUE2",
},
{
name: "empty but set",
setEnv: func(t *testing.T) {
t.Setenv("EMPTY", "")
},
contents: "Contains ${EMPTY} nothing",
expected: "Contains nothing",
},
{
name: "Default has special chars",
contents: `Not recommended but supported ${MY_VAR:-Default with special chars Supported#$\"}`,
expected: `Not recommended but supported Default with special chars Supported#$\"`, // values are escaped
},
{
name: "unset error",
contents: "Contains ${THIS_IS_NOT_SET?unset-error}",
wantErr: true,
errSubstring: "unset-error",
},
{
name: "env empty error",
setEnv: func(t *testing.T) {
t.Setenv("ENV_EMPTY", "")
},
contents: "Contains ${ENV_EMPTY:?empty-error}",
wantErr: true,
errSubstring: "empty-error",
},
{
name: "Fallback as env variable",
setEnv: func(t *testing.T) {
t.Setenv("FALLBACK", "my-fallback")
},
contents: "Should output ${NOT_SET:-${FALLBACK}}",
expected: "Should output my-fallback",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.setEnv != nil {
tt.setEnv(t)
}
actual, err := substituteEnvironment([]byte(tt.contents), false)
if tt.wantErr {
require.ErrorContains(t, err, tt.errSubstring)
return
}
require.EqualValues(t, tt.expected, string(actual))
})
}
}
func TestEnvironmentSubstitutionOldBehavior(t *testing.T) {
tests := []struct {
name string
contents string
expected string
}{
{
name: "not defined no brackets",
contents: `my-da$tabase`,
expected: `my-da$tabase`,
},
{
name: "not defined brackets",
contents: `my-da${ta}base`,
expected: `my-da${ta}base`,
},
{
name: "not defined no brackets double dollar",
contents: `my-da$$tabase`,
expected: `my-da$$tabase`,
},
{
name: "not defined no brackets backslash",
contents: `my-da\$tabase`,
expected: `my-da\$tabase`,
},
{
name: "not defined brackets backslash",
contents: `my-da\${ta}base`,
expected: `my-da\${ta}base`,
},
{
name: "no brackets and suffix",
contents: `my-da$VARbase`,
expected: `my-da$VARbase`,
},
{
name: "no brackets",
contents: `my-da$VAR`,
expected: `my-dafoobar`,
},
{
name: "brackets",
contents: `my-da${VAR}base`,
expected: `my-dafoobarbase`,
},
{
name: "no brackets double dollar",
contents: `my-da$$VAR`,
expected: `my-da$foobar`,
},
{
name: "brackets double dollar",
contents: `my-da$${VAR}`,
expected: `my-da$foobar`,
},
{
name: "no brackets backslash",
contents: `my-da\$VAR`,
expected: `my-da\foobar`,
},
{
name: "brackets backslash",
contents: `my-da\${VAR}base`,
expected: `my-da\foobarbase`,
},
{
name: "fallback",
contents: `my-da${ta:-omg}base`,
expected: `my-daomgbase`,
},
{
name: "fallback env",
contents: `my-da${ta:-${FALLBACK}}base`,
expected: `my-dadefaultbase`,
},
{
name: "regex substitution",
contents: `${1}`,
expected: `${1}`,
},
{
name: "empty but set",
contents: "Contains ${EMPTY} nothing",
expected: "Contains nothing",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Setenv("VAR", "foobar")
t.Setenv("FALLBACK", "default")
t.Setenv("EMPTY", "")
actual, err := substituteEnvironment([]byte(tt.contents), true)
require.NoError(t, err)
require.EqualValues(t, tt.expected, string(actual))
})
}
}
func TestEnvironmentSubstitutionNewBehavior(t *testing.T) {
tests := []struct {
name string
contents string
expected string
}{
{
name: "not defined no brackets",
contents: `my-da$tabase`,
expected: `my-da$tabase`,
},
{
name: "not defined brackets",
contents: `my-da${ta}base`,
expected: `my-da${ta}base`,
},
{
name: "not defined no brackets double dollar",
contents: `my-da$$tabase`,
expected: `my-da$tabase`,
},
{
name: "not defined no brackets backslash",
contents: `my-da\$tabase`,
expected: `my-da\$tabase`,
},
{
name: "not defined brackets backslash",
contents: `my-da\${ta}base`,
expected: `my-da\${ta}base`,
},
{
name: "no brackets and suffix",
contents: `my-da$VARbase`,
expected: `my-da$VARbase`,
},
{
name: "no brackets",
contents: `my-da$VAR`,
expected: `my-dafoobar`,
},
{
name: "brackets",
contents: `my-da${VAR}base`,
expected: `my-dafoobarbase`,
},
{
name: "no brackets double dollar",
contents: `my-da$$VAR`,
expected: `my-da$VAR`,
},
{
name: "brackets double dollar",
contents: `my-da$${VAR}`,
expected: `my-da${VAR}`,
},
{
name: "no brackets backslash",
contents: `my-da\$VAR`,
expected: `my-da\foobar`,
},
{
name: "brackets backslash",
contents: `my-da\${VAR}base`,
expected: `my-da\foobarbase`,
},
{
name: "fallback",
contents: `my-da${ta:-omg}base`,
expected: `my-daomgbase`,
},
{
name: "fallback env",
contents: `my-da${ta:-${FALLBACK}}base`,
expected: `my-dadefaultbase`,
},
{
name: "regex substitution",
contents: `${1}`,
expected: `${1}`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Setenv("VAR", "foobar")
t.Setenv("FALLBACK", "default")
actual, err := substituteEnvironment([]byte(tt.contents), false)
require.NoError(t, err)
require.EqualValues(t, tt.expected, string(actual))
})
}
}
func TestParseConfig(t *testing.T) {
tests := []struct {
name string
setEnv func(*testing.T)
contents string
expected string
errmsg string
}{
{
name: "empty var name",
contents: `
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})Should output ${NOT_SET:-${FALLBACK}}
`,
expected: "\n\n\n\n",
},
{
name: "comment in command (issue #13643)",
contents: `
[[inputs.exec]]
commands = ["echo \"abc#def\""]
`,
expected: `
[[inputs.exec]]
commands = ["echo \"abc#def\""]
`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.setEnv != nil {
tt.setEnv(t)
}
tbl, err := parseConfig([]byte(tt.contents))
if tt.errmsg != "" {
require.ErrorContains(t, err, tt.errmsg)
return
}
require.NoError(t, err)
if len(tt.expected) > 0 {
require.EqualValues(t, tt.expected, string(tbl.Data))
}
})
}
}
func TestRemoveComments(t *testing.T) {
// Read expectation
expected, err := os.ReadFile(filepath.Join("testdata", "envvar_comments_expected.toml"))
require.NoError(t, err)
// Read the file and remove the comments
buf, err := os.ReadFile(filepath.Join("testdata", "envvar_comments.toml"))
require.NoError(t, err)
removed, err := removeComments(buf)
require.NoError(t, err)
lines := bytes.Split(removed, []byte{'\n'})
for i, line := range lines {
lines[i] = bytes.TrimRight(line, " \t")
}
actual := bytes.Join(lines, []byte{'\n'})
// Do the comparison
require.Equal(t, string(expected), string(actual))
}
func TestURLRetries3Fails(t *testing.T) {
httpLoadConfigRetryInterval = 0 * time.Second
responseCounter := 0
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusNotFound)
responseCounter++
}))
defer ts.Close()
expected := fmt.Sprintf("loading config file %s failed: failed to fetch HTTP config: 404 Not Found", ts.URL)
c := NewConfig()
err := c.LoadConfig(ts.URL)
require.Error(t, err)
require.Equal(t, expected, err.Error())
require.Equal(t, 4, responseCounter)
}
func TestURLRetries3FailsThenPasses(t *testing.T) {
httpLoadConfigRetryInterval = 0 * time.Second
responseCounter := 0
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
if responseCounter <= 2 {
w.WriteHeader(http.StatusNotFound)
} else {
w.WriteHeader(http.StatusOK)
}
responseCounter++
}))
defer ts.Close()
c := NewConfig()
require.NoError(t, c.LoadConfig(ts.URL))
require.Equal(t, 4, responseCounter)
}

261
config/migration.go Normal file
View file

@ -0,0 +1,261 @@
package config
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"log"
"sort"
"strings"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
"github.com/influxdata/telegraf/migrations"
_ "github.com/influxdata/telegraf/migrations/all" // register all migrations
)
type section struct {
name string
begin int
content *ast.Table
raw *bytes.Buffer
}
func splitToSections(root *ast.Table) ([]section, error) {
var sections []section
for name, elements := range root.Fields {
switch name {
case "inputs", "outputs", "processors", "aggregators":
category, ok := elements.(*ast.Table)
if !ok {
return nil, fmt.Errorf("%q is not a table (%T)", name, category)
}
for plugin, elements := range category.Fields {
tbls, ok := elements.([]*ast.Table)
if !ok {
return nil, fmt.Errorf("elements of \"%s.%s\" is not a list of tables (%T)", name, plugin, elements)
}
for _, tbl := range tbls {
s := section{
name: name + "." + tbl.Name,
begin: tbl.Line,
content: tbl,
raw: &bytes.Buffer{},
}
sections = append(sections, s)
}
}
default:
tbl, ok := elements.(*ast.Table)
if !ok {
return nil, fmt.Errorf("%q is not a table (%T)", name, elements)
}
s := section{
name: name,
begin: tbl.Line,
content: tbl,
raw: &bytes.Buffer{},
}
sections = append(sections, s)
}
}
// Sort the TOML elements by begin (line-number)
sort.SliceStable(sections, func(i, j int) bool { return sections[i].begin < sections[j].begin })
return sections, nil
}
func assignTextToSections(data []byte, sections []section) ([]section, error) {
// Now assign the raw text to each section
if sections[0].begin > 0 {
sections = append([]section{{
name: "header",
begin: 0,
raw: &bytes.Buffer{},
}}, sections...)
}
var lineno int
scanner := bufio.NewScanner(bytes.NewBuffer(data))
for idx, next := range sections[1:] {
var buf bytes.Buffer
for lineno < next.begin-1 {
if !scanner.Scan() {
break
}
lineno++
line := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(line, "#") {
buf.Write(scanner.Bytes())
buf.WriteString("\n")
continue
} else if buf.Len() > 0 {
if _, err := io.Copy(sections[idx].raw, &buf); err != nil {
return nil, fmt.Errorf("copying buffer failed: %w", err)
}
buf.Reset()
}
sections[idx].raw.Write(scanner.Bytes())
sections[idx].raw.WriteString("\n")
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("splitting by line failed: %w", err)
}
// If a comment is directly in front of the next section, without
// newline, the comment is assigned to the next section.
if buf.Len() > 0 {
if _, err := io.Copy(sections[idx+1].raw, &buf); err != nil {
return nil, fmt.Errorf("copying buffer failed: %w", err)
}
buf.Reset()
}
}
// Write the remaining to the last section
for scanner.Scan() {
sections[len(sections)-1].raw.Write(scanner.Bytes())
sections[len(sections)-1].raw.WriteString("\n")
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("splitting by line failed: %w", err)
}
return sections, nil
}
func ApplyMigrations(data []byte) ([]byte, uint64, error) {
root, err := toml.Parse(data)
if err != nil {
return nil, 0, fmt.Errorf("parsing failed: %w", err)
}
// Split the configuration into sections containing the location
// in the file.
sections, err := splitToSections(root)
if err != nil {
return nil, 0, fmt.Errorf("splitting to sections failed: %w", err)
}
if len(sections) == 0 {
return nil, 0, errors.New("no TOML configuration found")
}
// Assign the configuration text to the corresponding segments
sections, err = assignTextToSections(data, sections)
if err != nil {
return nil, 0, fmt.Errorf("assigning text failed: %w", err)
}
var applied uint64
// Do the actual global section migration(s)
for idx, s := range sections {
if strings.Contains(s.name, ".") {
continue
}
log.Printf("D! applying global migrations to section %q in line %d...", s.name, s.begin)
for _, migrate := range migrations.GlobalMigrations {
result, msg, err := migrate(s.name, s.content)
if err != nil {
if errors.Is(err, migrations.ErrNotApplicable) {
continue
}
return nil, 0, fmt.Errorf("migrating options of %q (line %d) failed: %w", s.name, s.begin, err)
}
if msg != "" {
log.Printf("I! Global section %q in line %d: %s", s.name, s.begin, msg)
}
s.raw = bytes.NewBuffer(result)
applied++
}
sections[idx] = s
}
// Do the actual plugin migration(s)
for idx, s := range sections {
migrate, found := migrations.PluginMigrations[s.name]
if !found {
continue
}
log.Printf("D! migrating plugin %q in line %d...", s.name, s.begin)
result, msg, err := migrate(s.content)
if err != nil {
return nil, 0, fmt.Errorf("migrating %q (line %d) failed: %w", s.name, s.begin, err)
}
if msg != "" {
log.Printf("I! Plugin %q in line %d: %s", s.name, s.begin, msg)
}
s.raw = bytes.NewBuffer(result)
tbl, err := toml.Parse(s.raw.Bytes())
if err != nil {
return nil, 0, fmt.Errorf("reparsing migrated %q (line %d) failed: %w", s.name, s.begin, err)
}
s.content = tbl
sections[idx] = s
applied++
}
// Do the actual plugin option migration(s)
for idx, s := range sections {
migrate, found := migrations.PluginOptionMigrations[s.name]
if !found {
continue
}
log.Printf("D! migrating options of plugin %q in line %d...", s.name, s.begin)
result, msg, err := migrate(s.content)
if err != nil {
if errors.Is(err, migrations.ErrNotApplicable) {
continue
}
return nil, 0, fmt.Errorf("migrating options of %q (line %d) failed: %w", s.name, s.begin, err)
}
if msg != "" {
log.Printf("I! Plugin %q in line %d: %s", s.name, s.begin, msg)
}
s.raw = bytes.NewBuffer(result)
sections[idx] = s
applied++
}
// Do general migrations applying to all plugins
for idx, s := range sections {
parts := strings.Split(s.name, ".")
if len(parts) != 2 {
continue
}
log.Printf("D! applying general migrations to plugin %q in line %d...", s.name, s.begin)
category, name := parts[0], parts[1]
for _, migrate := range migrations.GeneralMigrations {
result, msg, err := migrate(category, name, s.content)
if err != nil {
if errors.Is(err, migrations.ErrNotApplicable) {
continue
}
return nil, 0, fmt.Errorf("migrating options of %q (line %d) failed: %w", s.name, s.begin, err)
}
if msg != "" {
log.Printf("I! Plugin %q in line %d: %s", s.name, s.begin, msg)
}
s.raw = bytes.NewBuffer(result)
applied++
}
sections[idx] = s
}
// Reconstruct the config file from the sections
var buf bytes.Buffer
for _, s := range sections {
_, err = s.raw.WriteTo(&buf)
if err != nil {
return nil, applied, fmt.Errorf("joining output failed: %w", err)
}
}
return buf.Bytes(), applied, nil
}

79
config/plugin_id.go Normal file
View file

@ -0,0 +1,79 @@
package config
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"sort"
"github.com/influxdata/toml/ast"
)
type keyValuePair struct {
Key string
Value string
}
func processTable(parent string, table *ast.Table) ([]keyValuePair, error) {
var prefix string
var options []keyValuePair
if parent != "" {
prefix = parent + "."
}
for k, value := range table.Fields {
switch v := value.(type) {
case *ast.KeyValue:
key := prefix + k
options = append(options, keyValuePair{
Key: key,
Value: v.Value.Source(),
})
case *ast.Table:
key := prefix + k
children, err := processTable(key, v)
if err != nil {
return nil, fmt.Errorf("parsing table for %q failed: %w", key, err)
}
options = append(options, children...)
case []*ast.Table:
for i, t := range v {
key := fmt.Sprintf("%s#%d.%s", prefix, i, k)
children, err := processTable(key, t)
if err != nil {
return nil, fmt.Errorf("parsing table for %q #%d failed: %w", key, i, err)
}
options = append(options, children...)
}
default:
return nil, fmt.Errorf("unknown node type %T in key %q", value, prefix+k)
}
}
return options, nil
}
func generatePluginID(prefix string, table *ast.Table) (string, error) {
// We need to ensure that identically configured plugins _always_
// result in the same ID no matter which order the options are specified.
// This is even more relevant as Golang does _not_ give any guarantee
// on the ordering of maps.
// So we flatten out the configuration options (also for nested objects)
// and then sort the resulting array by the canonical key-name.
cfg, err := processTable("", table)
if err != nil {
return "", fmt.Errorf("processing AST failed: %w", err)
}
sort.SliceStable(cfg, func(i, j int) bool { return cfg[i].Key < cfg[j].Key })
// Hash the config options to get the ID. We also prefix the ID with
// the plugin name to prevent overlap with other plugin types.
hash := sha256.New()
hash.Write(append([]byte(prefix), 0))
for _, kv := range cfg {
hash.Write([]byte(kv.Key + ":" + kv.Value))
hash.Write([]byte{0})
}
return hex.EncodeToString(hash.Sum(nil)), nil
}

106
config/plugin_printer.go Normal file
View file

@ -0,0 +1,106 @@
package config
import (
"bytes"
"fmt"
"sort"
"strings"
"github.com/jedib0t/go-pretty/v6/table"
)
var headers = []string{"Name", "Source(s)"}
type pluginPrinter struct {
name string
source string
}
type pluginNames []pluginPrinter
func getPluginSourcesTable(pluginNames []pluginPrinter) string {
if !PrintPluginConfigSource {
return ""
}
if len(pluginNames) == 0 {
return ""
}
data := make([][]any, 0, len(pluginNames))
rows := make(map[string][]string)
for _, plugin := range pluginNames {
if _, ok := rows[plugin.name]; !ok {
rows[plugin.name] = make([]string, 0)
}
rows[plugin.name] = append(rows[plugin.name], plugin.source)
}
for name, sources := range rows {
var nameCountStr string
if len(sources) > 1 {
nameCountStr = fmt.Sprintf("%s (%dx)", name, len(sources))
} else {
nameCountStr = name
}
data = append(data, []any{nameCountStr, sources})
}
sort.Slice(data, func(i, j int) bool {
return len(data[i][1].([]string)) > len(data[j][1].([]string))
})
return getTableString(headers, data)
}
func getTableString(headers []string, data [][]any) string {
buff := new(bytes.Buffer)
t := table.NewWriter()
t.SetOutputMirror(buff)
t.AppendHeader(convertToRow(headers))
// Append rows
for _, row := range data {
processedRow := make([]interface{}, len(row))
for i, col := range row {
switch v := col.(type) {
case []string: // Convert slices to multi-line strings
var source map[string]int
for _, s := range v {
if source == nil {
source = make(map[string]int)
}
source[s]++
}
// sort the sources according to the count
sources := make([]string, 0, len(source))
for s := range source {
sources = append(sources, s)
}
sort.Slice(sources, func(i, j int) bool {
return source[sources[i]] > source[sources[j]]
})
for i, s := range sources {
if source[s] > 1 {
sources[i] = fmt.Sprintf("%s (%dx)", s, source[s])
}
}
processedRow[i] = strings.Join(sources, "\n")
default:
processedRow[i] = v
}
}
t.AppendRow(processedRow)
}
t.Style().Options.SeparateRows = true
return t.Render()
}
// Helper function to convert headers to table.Row
func convertToRow(data []string) table.Row {
row := make(table.Row, len(data))
for i, val := range data {
row[i] = val
}
return row
}

315
config/secret.go Normal file
View file

@ -0,0 +1,315 @@
package config
import (
"fmt"
"log"
"regexp"
"strings"
"sync/atomic"
"github.com/influxdata/telegraf"
)
// unlinkedSecrets contains the list of secrets that contain
// references not yet linked to their corresponding secret-store.
// Those secrets must later (after reading the config) be linked
// by the config to their respective secret-stores.
// Secrets containing constant strings will not be found in this
// list.
var unlinkedSecrets = make([]*Secret, 0)
// secretStorePattern is a regex to validate secret-store IDs
var secretStorePattern = regexp.MustCompile(`^\w+$`)
// secretPattern is a regex to extract references to secrets store in a secret-store
var secretPattern = regexp.MustCompile(`@\{(\w+:\w+)\}`)
// secretCandidatePattern is a regex to find secret candidates to warn users on invalid characters in references
var secretCandidatePattern = regexp.MustCompile(`@\{.+?:.+?}`)
// secretCount is the number of secrets use in Telegraf
var secretCount atomic.Int64
// selectedImpl is the configured implementation for secrets
var selectedImpl secretImpl = &protectedSecretImpl{}
// secretImpl represents an abstraction for different implementations of secrets
type secretImpl interface {
Container(secret []byte) secretContainer
EmptyBuffer() SecretBuffer
Wipe(secret []byte)
}
func EnableSecretProtection() {
selectedImpl = &protectedSecretImpl{}
}
func DisableSecretProtection() {
selectedImpl = &unprotectedSecretImpl{}
}
// secretContainer represents an abstraction of the container holding the
// actual secret value
type secretContainer interface {
Destroy()
Equals(ref []byte) (bool, error)
Buffer() (SecretBuffer, error)
AsBuffer(secret []byte) SecretBuffer
Replace(secret []byte)
}
// SecretBuffer allows to access the content of the secret
type SecretBuffer interface {
// Size returns the length of the buffer content
Size() int
// Grow will grow the capacity of the underlying buffer to the given size
Grow(capacity int)
// Bytes returns the content of the buffer as bytes.
// NOTE: The returned bytes shall NOT be accessed after destroying the
// buffer using 'Destroy()' as the underlying the memory area might be
// wiped and invalid.
Bytes() []byte
// TemporaryString returns the content of the buffer as a string.
// NOTE: The returned String shall NOT be accessed after destroying the
// buffer using 'Destroy()' as the underlying the memory area might be
// wiped and invalid.
TemporaryString() string
// String returns a copy of the underlying buffer's content as string.
// It is safe to use the returned value after destroying the buffer.
String() string
// Destroy will wipe the buffer's content and destroy the underlying
// buffer. Do not access the buffer after destroying it.
Destroy()
}
// Secret safely stores sensitive data such as a password or token
type Secret struct {
// container is the implementation for holding the secret. It can be
// protected or not depending on the concrete implementation.
container secretContainer
// resolvers are the functions for resolving a given secret-id (key)
resolvers map[string]telegraf.ResolveFunc
// unlinked contains all references in the secret that are not yet
// linked to the corresponding secret store.
unlinked []string
// notempty denotes if the secret is completely empty
notempty bool
}
// NewSecret creates a new secret from the given bytes
func NewSecret(b []byte) Secret {
s := Secret{}
s.init(b)
return s
}
// UnmarshalText creates a secret from a toml value following the "string" rule.
func (s *Secret) UnmarshalText(b []byte) error {
// Unmarshal secret from TOML and put it into protected memory
s.init(b)
// Keep track of secrets that contain references to secret-stores
// for later resolving by the config.
if len(s.unlinked) > 0 && s.notempty {
unlinkedSecrets = append(unlinkedSecrets, s)
}
return nil
}
// Initialize the secret content
func (s *Secret) init(secret []byte) {
// Keep track of the number of secrets...
secretCount.Add(1)
// Remember if the secret is completely empty
s.notempty = len(secret) != 0
// Find all secret candidates and check if they are really a valid
// reference. Otherwise issue a warning to let the user know that there is
// a potential issue with their secret instead of silently ignoring it.
candidates := secretCandidatePattern.FindAllString(string(secret), -1)
s.unlinked = make([]string, 0, len(candidates))
for _, c := range candidates {
if secretPattern.MatchString(c) {
s.unlinked = append(s.unlinked, c)
} else {
log.Printf("W! Secret %q contains invalid character(s), only letters, digits and underscores are allowed.", c)
}
}
s.resolvers = nil
// Setup the container implementation
s.container = selectedImpl.Container(secret)
}
// Destroy the secret content
func (s *Secret) Destroy() {
s.resolvers = nil
s.unlinked = nil
s.notempty = false
if s.container != nil {
s.container.Destroy()
s.container = nil
// Keep track of the number of used secrets...
secretCount.Add(-1)
}
}
// Empty return if the secret is completely empty
func (s *Secret) Empty() bool {
return !s.notempty
}
// EqualTo performs a constant-time comparison of the secret to the given reference
func (s *Secret) EqualTo(ref []byte) (bool, error) {
if s.container == nil {
return false, nil
}
if len(s.unlinked) > 0 {
return false, fmt.Errorf("unlinked parts in secret: %v", strings.Join(s.unlinked, ";"))
}
return s.container.Equals(ref)
}
// Get return the string representation of the secret
func (s *Secret) Get() (SecretBuffer, error) {
if s.container == nil {
return selectedImpl.EmptyBuffer(), nil
}
if len(s.unlinked) > 0 {
return nil, fmt.Errorf("unlinked parts in secret: %v", strings.Join(s.unlinked, ";"))
}
// Decrypt the secret so we can return it
buffer, err := s.container.Buffer()
if err != nil {
return nil, err
}
// We've got a static secret so simply return the buffer
if len(s.resolvers) == 0 {
return buffer, nil
}
defer buffer.Destroy()
replaceErrs := make([]string, 0)
newsecret := secretPattern.ReplaceAllFunc(buffer.Bytes(), func(match []byte) []byte {
resolver, found := s.resolvers[string(match)]
if !found {
replaceErrs = append(replaceErrs, fmt.Sprintf("no resolver for %q", match))
return match
}
replacement, _, err := resolver()
if err != nil {
replaceErrs = append(replaceErrs, fmt.Sprintf("resolving %q failed: %v", match, err))
return match
}
return replacement
})
if len(replaceErrs) > 0 {
selectedImpl.Wipe(newsecret)
return nil, fmt.Errorf("replacing secrets failed: %s", strings.Join(replaceErrs, ";"))
}
return s.container.AsBuffer(newsecret), nil
}
// Set overwrites the secret's value with a new one. Please note, the secret
// is not linked again, so only references to secret-stores can be used, e.g. by
// adding more clear-text or reordering secrets.
func (s *Secret) Set(value []byte) error {
// Link the new value can be resolved
secret, res, replaceErrs := resolve(value, s.resolvers)
if len(replaceErrs) > 0 {
return fmt.Errorf("linking new secrets failed: %s", strings.Join(replaceErrs, ";"))
}
// Set the new secret
s.container.Replace(secret)
s.resolvers = res
s.notempty = len(value) > 0
return nil
}
// GetUnlinked return the parts of the secret that is not yet linked to a resolver
func (s *Secret) GetUnlinked() []string {
return s.unlinked
}
// Link used the given resolver map to link the secret parts to their
// secret-store resolvers.
func (s *Secret) Link(resolvers map[string]telegraf.ResolveFunc) error {
// Decrypt the secret so we can return it
if s.container == nil {
return nil
}
buffer, err := s.container.Buffer()
if err != nil {
return err
}
defer buffer.Destroy()
// Iterate through the parts and try to resolve them. For static parts
// we directly replace them, while for dynamic ones we store the resolver.
newsecret, res, replaceErrs := resolve(buffer.Bytes(), resolvers)
if len(replaceErrs) > 0 {
return fmt.Errorf("linking secrets failed: %s", strings.Join(replaceErrs, ";"))
}
s.resolvers = res
// Store the secret if it has changed
if buffer.TemporaryString() != string(newsecret) {
s.container.Replace(newsecret)
}
// All linked now
s.unlinked = nil
return nil
}
func resolve(secret []byte, resolvers map[string]telegraf.ResolveFunc) ([]byte, map[string]telegraf.ResolveFunc, []string) {
// Iterate through the parts and try to resolve them. For static parts
// we directly replace them, while for dynamic ones we store the resolver.
replaceErrs := make([]string, 0)
remaining := make(map[string]telegraf.ResolveFunc)
newsecret := secretPattern.ReplaceAllFunc(secret, func(match []byte) []byte {
resolver, found := resolvers[string(match)]
if !found {
replaceErrs = append(replaceErrs, fmt.Sprintf("unlinked part %q", match))
return match
}
replacement, dynamic, err := resolver()
if err != nil {
replaceErrs = append(replaceErrs, fmt.Sprintf("resolving %q failed: %v", match, err))
return match
}
// Replace static parts right away
if !dynamic {
return replacement
}
// Keep the resolver for dynamic secrets
remaining[string(match)] = resolver
return match
})
return newsecret, remaining, replaceErrs
}
func splitLink(s string) (storeID, key string) {
// There should _ALWAYS_ be two parts due to the regular expression match
parts := strings.SplitN(s[2:len(s)-1], ":", 2)
return parts[0], parts[1]
}

131
config/secret_protected.go Normal file
View file

@ -0,0 +1,131 @@
package config
import (
"fmt"
"github.com/awnumar/memguard"
)
type protectedSecretImpl struct{}
func (*protectedSecretImpl) Container(secret []byte) secretContainer {
return &protectedSecretContainer{
enclave: memguard.NewEnclave(secret),
}
}
func (*protectedSecretImpl) EmptyBuffer() SecretBuffer {
return &lockedBuffer{}
}
func (*protectedSecretImpl) Wipe(secret []byte) {
memguard.WipeBytes(secret)
}
type lockedBuffer struct {
buf *memguard.LockedBuffer
}
func (lb *lockedBuffer) Size() int {
if lb.buf == nil {
return 0
}
return lb.buf.Size()
}
func (lb *lockedBuffer) Grow(capacity int) {
size := lb.Size()
if capacity <= size {
return
}
buf := memguard.NewBuffer(capacity)
if lb.buf != nil {
buf.Copy(lb.buf.Bytes())
}
lb.buf.Destroy()
lb.buf = buf
}
func (lb *lockedBuffer) Bytes() []byte {
if lb.buf == nil {
return nil
}
return lb.buf.Bytes()
}
func (lb *lockedBuffer) TemporaryString() string {
if lb.buf == nil {
return ""
}
return lb.buf.String()
}
func (lb *lockedBuffer) String() string {
if lb.buf == nil {
return ""
}
return string(lb.buf.Bytes())
}
func (lb *lockedBuffer) Destroy() {
if lb.buf == nil {
return
}
lb.buf.Destroy()
lb.buf = nil
}
type protectedSecretContainer struct {
enclave *memguard.Enclave
}
func (c *protectedSecretContainer) Destroy() {
if c.enclave == nil {
return
}
// Wipe the secret from memory
lockbuf, err := c.enclave.Open()
if err == nil {
lockbuf.Destroy()
}
c.enclave = nil
}
func (c *protectedSecretContainer) Equals(ref []byte) (bool, error) {
if c.enclave == nil {
return false, nil
}
// Get a locked-buffer of the secret to perform the comparison
lockbuf, err := c.enclave.Open()
if err != nil {
return false, fmt.Errorf("opening enclave failed: %w", err)
}
defer lockbuf.Destroy()
return lockbuf.EqualTo(ref), nil
}
func (c *protectedSecretContainer) Buffer() (SecretBuffer, error) {
if c.enclave == nil {
return &lockedBuffer{}, nil
}
// Get a locked-buffer of the secret to perform the comparison
lockbuf, err := c.enclave.Open()
if err != nil {
return nil, fmt.Errorf("opening enclave failed: %w", err)
}
return &lockedBuffer{lockbuf}, nil
}
func (*protectedSecretContainer) AsBuffer(secret []byte) SecretBuffer {
return &lockedBuffer{memguard.NewBufferFromBytes(secret)}
}
func (c *protectedSecretContainer) Replace(secret []byte) {
c.enclave = memguard.NewEnclave(secret)
}

845
config/secret_test.go Normal file
View file

@ -0,0 +1,845 @@
package config
import (
"bytes"
"errors"
"fmt"
"log"
"testing"
"github.com/awnumar/memguard"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/secretstores"
)
func TestSecretConstantManually(t *testing.T) {
mysecret := "a wonderful test"
s := NewSecret([]byte(mysecret))
defer s.Destroy()
retrieved, err := s.Get()
require.NoError(t, err)
defer retrieved.Destroy()
require.EqualValues(t, mysecret, retrieved.TemporaryString())
}
func TestLinking(t *testing.T) {
mysecret := "a @{referenced:secret}"
resolvers := map[string]telegraf.ResolveFunc{
"@{referenced:secret}": func() ([]byte, bool, error) {
return []byte("resolved secret"), false, nil
},
}
s := NewSecret([]byte(mysecret))
defer s.Destroy()
require.NoError(t, s.Link(resolvers))
retrieved, err := s.Get()
require.NoError(t, err)
defer retrieved.Destroy()
require.EqualValues(t, "a resolved secret", retrieved.TemporaryString())
}
func TestLinkingResolverError(t *testing.T) {
mysecret := "a @{referenced:secret}"
resolvers := map[string]telegraf.ResolveFunc{
"@{referenced:secret}": func() ([]byte, bool, error) {
return nil, false, errors.New("broken")
},
}
s := NewSecret([]byte(mysecret))
defer s.Destroy()
expected := `linking secrets failed: resolving "@{referenced:secret}" failed: broken`
require.EqualError(t, s.Link(resolvers), expected)
}
func TestGettingUnlinked(t *testing.T) {
mysecret := "a @{referenced:secret}"
s := NewSecret([]byte(mysecret))
defer s.Destroy()
_, err := s.Get()
require.ErrorContains(t, err, "unlinked parts in secret")
}
func TestGettingMissingResolver(t *testing.T) {
mysecret := "a @{referenced:secret}"
s := NewSecret([]byte(mysecret))
defer s.Destroy()
s.unlinked = make([]string, 0)
s.resolvers = map[string]telegraf.ResolveFunc{
"@{a:dummy}": func() ([]byte, bool, error) {
return nil, false, nil
},
}
_, err := s.Get()
expected := `replacing secrets failed: no resolver for "@{referenced:secret}"`
require.EqualError(t, err, expected)
}
func TestGettingResolverError(t *testing.T) {
mysecret := "a @{referenced:secret}"
s := NewSecret([]byte(mysecret))
defer s.Destroy()
s.unlinked = make([]string, 0)
s.resolvers = map[string]telegraf.ResolveFunc{
"@{referenced:secret}": func() ([]byte, bool, error) {
return nil, false, errors.New("broken")
},
}
_, err := s.Get()
expected := `replacing secrets failed: resolving "@{referenced:secret}" failed: broken`
require.EqualError(t, err, expected)
}
func TestUninitializedEnclave(t *testing.T) {
s := Secret{}
defer s.Destroy()
require.NoError(t, s.Link(map[string]telegraf.ResolveFunc{}))
retrieved, err := s.Get()
require.NoError(t, err)
defer retrieved.Destroy()
require.Empty(t, retrieved.Bytes())
}
func TestEnclaveOpenError(t *testing.T) {
mysecret := "a @{referenced:secret}"
s := NewSecret([]byte(mysecret))
defer s.Destroy()
memguard.Purge()
err := s.Link(map[string]telegraf.ResolveFunc{})
require.ErrorContains(t, err, "opening enclave failed")
s.unlinked = make([]string, 0)
_, err = s.Get()
require.ErrorContains(t, err, "opening enclave failed")
}
func TestMissingResolver(t *testing.T) {
mysecret := "a @{referenced:secret}"
s := NewSecret([]byte(mysecret))
defer s.Destroy()
err := s.Link(map[string]telegraf.ResolveFunc{})
require.ErrorContains(t, err, "linking secrets failed: unlinked part")
}
func TestSecretConstant(t *testing.T) {
tests := []struct {
name string
cfg []byte
expected string
}{
{
name: "simple string",
cfg: []byte(`
[[inputs.mockup]]
secret = "a secret"
`),
expected: "a secret",
},
{
name: "mail address",
cfg: []byte(`
[[inputs.mockup]]
secret = "someone@mock.org"
`),
expected: "someone@mock.org",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewConfig()
require.NoError(t, c.LoadConfigData(tt.cfg, EmptySourcePath))
require.Len(t, c.Inputs, 1)
// Create a mockup secretstore
store := &MockupSecretStore{
Secrets: map[string][]byte{"mock": []byte("fail")},
}
require.NoError(t, store.Init())
c.SecretStores["mock"] = store
require.NoError(t, c.LinkSecrets())
plugin := c.Inputs[0].Input.(*MockupSecretPlugin)
secret, err := plugin.Secret.Get()
require.NoError(t, err)
defer secret.Destroy()
require.EqualValues(t, tt.expected, secret.TemporaryString())
})
}
}
func TestSecretUnquote(t *testing.T) {
tests := []struct {
name string
cfg []byte
}{
{
name: "single quotes",
cfg: []byte(`
[[inputs.mockup]]
secret = 'a secret'
expected = 'a secret'
`),
},
{
name: "double quotes",
cfg: []byte(`
[[inputs.mockup]]
secret = "a secret"
expected = "a secret"
`),
},
{
name: "triple single quotes",
cfg: []byte(`
[[inputs.mockup]]
secret = '''a secret'''
expected = '''a secret'''
`),
},
{
name: "triple double quotes",
cfg: []byte(`
[[inputs.mockup]]
secret = """a secret"""
expected = """a secret"""
`),
},
{
name: "escaped double quotes",
cfg: []byte(`
[[inputs.mockup]]
secret = "\"a secret\""
expected = "\"a secret\""
`),
},
{
name: "mix double-single quotes (single)",
cfg: []byte(`
[[inputs.mockup]]
secret = "'a secret'"
expected = "'a secret'"
`),
},
{
name: "mix single-double quotes (single)",
cfg: []byte(`
[[inputs.mockup]]
secret = '"a secret"'
expected = '"a secret"'
`),
},
{
name: "mix double-single quotes (triple-single)",
cfg: []byte(`
[[inputs.mockup]]
secret = """'a secret'"""
expected = """'a secret'"""
`),
},
{
name: "mix single-double quotes (triple-single)",
cfg: []byte(`
[[inputs.mockup]]
secret = '''"a secret"'''
expected = '''"a secret"'''
`),
},
{
name: "mix double-single quotes (triple)",
cfg: []byte(`
[[inputs.mockup]]
secret = """'''a secret'''"""
expected = """'''a secret'''"""
`),
},
{
name: "mix single-double quotes (triple)",
cfg: []byte(`
[[inputs.mockup]]
secret = '''"""a secret"""'''
expected = '''"""a secret"""'''
`),
},
{
name: "single quotes with backslashes",
cfg: []byte(`
[[inputs.mockup]]
secret = 'Server=SQLTELEGRAF\\SQL2022;app name=telegraf;log=1;'
expected = 'Server=SQLTELEGRAF\\SQL2022;app name=telegraf;log=1;'
`),
},
{
name: "double quotes with backslashes",
cfg: []byte(`
[[inputs.mockup]]
secret = "Server=SQLTELEGRAF\\SQL2022;app name=telegraf;log=1;"
expected = "Server=SQLTELEGRAF\\SQL2022;app name=telegraf;log=1;"
`),
},
{
name: "triple single quotes with backslashes",
cfg: []byte(`
[[inputs.mockup]]
secret = '''Server=SQLTELEGRAF\\SQL2022;app name=telegraf;log=1;'''
expected = '''Server=SQLTELEGRAF\\SQL2022;app name=telegraf;log=1;'''
`),
},
{
name: "triple double quotes with backslashes",
cfg: []byte(`
[[inputs.mockup]]
secret = """Server=SQLTELEGRAF\\SQL2022;app name=telegraf;log=1;"""
expected = """Server=SQLTELEGRAF\\SQL2022;app name=telegraf;log=1;"""
`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewConfig()
require.NoError(t, c.LoadConfigData(tt.cfg, EmptySourcePath))
require.Len(t, c.Inputs, 1)
// Create a mockup secretstore
store := &MockupSecretStore{
Secrets: map[string][]byte{},
}
require.NoError(t, store.Init())
c.SecretStores["mock"] = store
require.NoError(t, c.LinkSecrets())
plugin := c.Inputs[0].Input.(*MockupSecretPlugin)
secret, err := plugin.Secret.Get()
require.NoError(t, err)
defer secret.Destroy()
require.EqualValues(t, plugin.Expected, secret.TemporaryString())
})
}
}
func TestSecretEnvironmentVariable(t *testing.T) {
cfg := []byte(`
[[inputs.mockup]]
secret = "$SOME_ENV_SECRET"
`)
t.Setenv("SOME_ENV_SECRET", "an env secret")
c := NewConfig()
err := c.LoadConfigData(cfg, EmptySourcePath)
require.NoError(t, err)
require.Len(t, c.Inputs, 1)
// Create a mockup secretstore
store := &MockupSecretStore{
Secrets: map[string][]byte{},
}
require.NoError(t, store.Init())
c.SecretStores["mock"] = store
require.NoError(t, c.LinkSecrets())
plugin := c.Inputs[0].Input.(*MockupSecretPlugin)
secret, err := plugin.Secret.Get()
require.NoError(t, err)
defer secret.Destroy()
require.EqualValues(t, "an env secret", secret.TemporaryString())
}
func TestSecretCount(t *testing.T) {
secretCount.Store(0)
cfg := []byte(`
[[inputs.mockup]]
[[inputs.mockup]]
secret = "a secret"
[[inputs.mockup]]
secret = "another secret"
`)
c := NewConfig()
require.NoError(t, c.LoadConfigData(cfg, EmptySourcePath))
require.Len(t, c.Inputs, 3)
require.Equal(t, int64(2), secretCount.Load())
// Remove all secrets and check
for _, ri := range c.Inputs {
input := ri.Input.(*MockupSecretPlugin)
input.Secret.Destroy()
}
require.Equal(t, int64(0), secretCount.Load())
}
func TestSecretStoreStatic(t *testing.T) {
cfg := []byte(
`
[[inputs.mockup]]
secret = "@{mock:secret1}"
[[inputs.mockup]]
secret = "@{mock:secret2}"
[[inputs.mockup]]
secret = "@{mock:a_strange_secret}"
[[inputs.mockup]]
secret = "@{mock:a_weird_secret}"
`)
c := NewConfig()
err := c.LoadConfigData(cfg, EmptySourcePath)
require.NoError(t, err)
require.Len(t, c.Inputs, 4)
// Create a mockup secretstore
store := &MockupSecretStore{
Secrets: map[string][]byte{
"secret1": []byte("Ood Bnar"),
"secret2": []byte("Thon"),
"a_strange_secret": []byte("Obi-Wan Kenobi"),
"a_weird_secret": []byte("Arca Jeth"),
},
}
require.NoError(t, store.Init())
c.SecretStores["mock"] = store
require.NoError(t, c.LinkSecrets())
expected := []string{"Ood Bnar", "Thon", "Obi-Wan Kenobi", "Arca Jeth"}
for i, input := range c.Inputs {
plugin := input.Input.(*MockupSecretPlugin)
secret, err := plugin.Secret.Get()
require.NoError(t, err)
require.EqualValues(t, expected[i], secret.TemporaryString())
secret.Destroy()
}
}
func TestSecretStoreInvalidKeys(t *testing.T) {
cfg := []byte(
`
[[inputs.mockup]]
secret = "@{mock:}"
[[inputs.mockup]]
secret = "@{mock:wild?%go}"
[[inputs.mockup]]
secret = "@{mock:a-strange-secret}"
[[inputs.mockup]]
secret = "@{mock:a weird secret}"
`)
c := NewConfig()
err := c.LoadConfigData(cfg, EmptySourcePath)
require.NoError(t, err)
require.Len(t, c.Inputs, 4)
// Create a mockup secretstore
store := &MockupSecretStore{
Secrets: map[string][]byte{
"": []byte("Ood Bnar"),
"wild?%go": []byte("Thon"),
"a-strange-secret": []byte("Obi-Wan Kenobi"),
"a weird secret": []byte("Arca Jeth"),
},
}
require.NoError(t, store.Init())
c.SecretStores["mock"] = store
require.NoError(t, c.LinkSecrets())
expected := []string{
"@{mock:}",
"@{mock:wild?%go}",
"@{mock:a-strange-secret}",
"@{mock:a weird secret}",
}
for i, input := range c.Inputs {
plugin := input.Input.(*MockupSecretPlugin)
secret, err := plugin.Secret.Get()
require.NoError(t, err)
require.EqualValues(t, expected[i], secret.TemporaryString())
secret.Destroy()
}
}
func TestSecretStoreDeclarationMissingID(t *testing.T) {
defer func() { unlinkedSecrets = make([]*Secret, 0) }()
cfg := []byte(`[[secretstores.mockup]]`)
c := NewConfig()
err := c.LoadConfigData(cfg, EmptySourcePath)
require.ErrorContains(t, err, `error parsing mockup, "mockup" secret-store without ID`)
}
func TestSecretStoreDeclarationInvalidID(t *testing.T) {
defer func() { unlinkedSecrets = make([]*Secret, 0) }()
invalidIDs := []string{"foo.bar", "dummy-123", "test!", "wohoo+"}
tmpl := `
[[secretstores.mockup]]
id = %q
`
for _, id := range invalidIDs {
t.Run(id, func(t *testing.T) {
cfg := []byte(fmt.Sprintf(tmpl, id))
c := NewConfig()
err := c.LoadConfigData(cfg, EmptySourcePath)
require.ErrorContains(t, err, `error parsing mockup, invalid secret-store ID`)
})
}
}
func TestSecretStoreDeclarationValidID(t *testing.T) {
defer func() { unlinkedSecrets = make([]*Secret, 0) }()
validIDs := []string{"foobar", "dummy123", "test_id", "W0Hoo_lala123"}
tmpl := `
[[secretstores.mockup]]
id = %q
`
for _, id := range validIDs {
t.Run(id, func(t *testing.T) {
cfg := []byte(fmt.Sprintf(tmpl, id))
c := NewConfig()
err := c.LoadConfigData(cfg, EmptySourcePath)
require.NoError(t, err)
})
}
}
type SecretImplTestSuite struct {
suite.Suite
protected bool
}
func (tsuite *SecretImplTestSuite) SetupSuite() {
if tsuite.protected {
EnableSecretProtection()
} else {
DisableSecretProtection()
}
}
func (*SecretImplTestSuite) TearDownSuite() {
EnableSecretProtection()
}
func (*SecretImplTestSuite) TearDownTest() {
unlinkedSecrets = make([]*Secret, 0)
}
func (tsuite *SecretImplTestSuite) TestSecretEqualTo() {
t := tsuite.T()
mysecret := "a wonderful test"
s := NewSecret([]byte(mysecret))
defer s.Destroy()
equal, err := s.EqualTo([]byte(mysecret))
require.NoError(t, err)
require.True(t, equal)
equal, err = s.EqualTo([]byte("some random text"))
require.NoError(t, err)
require.False(t, equal)
}
func (tsuite *SecretImplTestSuite) TestSecretStoreInvalidReference() {
t := tsuite.T()
cfg := []byte(
`
[[inputs.mockup]]
secret = "@{mock:test}"
`)
c := NewConfig()
require.NoError(t, c.LoadConfigData(cfg, EmptySourcePath))
require.Len(t, c.Inputs, 1)
// Create a mockup secretstore
store := &MockupSecretStore{
Secrets: map[string][]byte{"test": []byte("Arca Jeth")},
}
require.NoError(t, store.Init())
c.SecretStores["foo"] = store
err := c.LinkSecrets()
require.EqualError(t, err, `unknown secret-store for "@{mock:test}"`)
for _, input := range c.Inputs {
plugin := input.Input.(*MockupSecretPlugin)
secret, err := plugin.Secret.Get()
require.EqualError(t, err, `unlinked parts in secret: @{mock:test}`)
require.Empty(t, secret)
}
}
func (tsuite *SecretImplTestSuite) TestSecretStoreStaticChanging() {
t := tsuite.T()
cfg := []byte(
`
[[inputs.mockup]]
secret = "@{mock:secret}"
`)
c := NewConfig()
err := c.LoadConfigData(cfg, EmptySourcePath)
require.NoError(t, err)
require.Len(t, c.Inputs, 1)
// Create a mockup secretstore
store := &MockupSecretStore{
Secrets: map[string][]byte{"secret": []byte("Ood Bnar")},
Dynamic: false,
}
require.NoError(t, store.Init())
c.SecretStores["mock"] = store
require.NoError(t, c.LinkSecrets())
sequence := []string{"Ood Bnar", "Thon", "Obi-Wan Kenobi", "Arca Jeth"}
plugin := c.Inputs[0].Input.(*MockupSecretPlugin)
secret, err := plugin.Secret.Get()
require.NoError(t, err)
defer secret.Destroy()
require.EqualValues(t, "Ood Bnar", secret.TemporaryString())
for _, v := range sequence {
store.Secrets["secret"] = []byte(v)
secret, err := plugin.Secret.Get()
require.NoError(t, err)
// The secret should not change as the store is marked non-dyamic!
require.EqualValues(t, "Ood Bnar", secret.TemporaryString())
secret.Destroy()
}
}
func (tsuite *SecretImplTestSuite) TestSecretStoreDynamic() {
t := tsuite.T()
cfg := []byte(
`
[[inputs.mockup]]
secret = "@{mock:secret}"
`)
c := NewConfig()
err := c.LoadConfigData(cfg, EmptySourcePath)
require.NoError(t, err)
require.Len(t, c.Inputs, 1)
// Create a mockup secretstore
store := &MockupSecretStore{
Secrets: map[string][]byte{"secret": []byte("Ood Bnar")},
Dynamic: true,
}
require.NoError(t, store.Init())
c.SecretStores["mock"] = store
require.NoError(t, c.LinkSecrets())
sequence := []string{"Ood Bnar", "Thon", "Obi-Wan Kenobi", "Arca Jeth"}
plugin := c.Inputs[0].Input.(*MockupSecretPlugin)
for _, v := range sequence {
store.Secrets["secret"] = []byte(v)
secret, err := plugin.Secret.Get()
require.NoError(t, err)
// The secret should not change as the store is marked non-dynamic!
require.EqualValues(t, v, secret.TemporaryString())
secret.Destroy()
}
}
func (tsuite *SecretImplTestSuite) TestSecretSet() {
t := tsuite.T()
cfg := []byte(`
[[inputs.mockup]]
secret = "a secret"
`)
c := NewConfig()
require.NoError(t, c.LoadConfigData(cfg, EmptySourcePath))
require.Len(t, c.Inputs, 1)
require.NoError(t, c.LinkSecrets())
plugin := c.Inputs[0].Input.(*MockupSecretPlugin)
secret, err := plugin.Secret.Get()
require.NoError(t, err)
defer secret.Destroy()
require.EqualValues(t, "a secret", secret.TemporaryString())
require.NoError(t, plugin.Secret.Set([]byte("another secret")))
newsecret, err := plugin.Secret.Get()
require.NoError(t, err)
defer newsecret.Destroy()
require.EqualValues(t, "another secret", newsecret.TemporaryString())
}
func (tsuite *SecretImplTestSuite) TestSecretSetResolve() {
t := tsuite.T()
cfg := []byte(`
[[inputs.mockup]]
secret = "@{mock:secret}"
`)
c := NewConfig()
require.NoError(t, c.LoadConfigData(cfg, EmptySourcePath))
require.Len(t, c.Inputs, 1)
// Create a mockup secretstore
store := &MockupSecretStore{
Secrets: map[string][]byte{"secret": []byte("Ood Bnar")},
Dynamic: true,
}
require.NoError(t, store.Init())
c.SecretStores["mock"] = store
require.NoError(t, c.LinkSecrets())
plugin := c.Inputs[0].Input.(*MockupSecretPlugin)
secret, err := plugin.Secret.Get()
require.NoError(t, err)
defer secret.Destroy()
require.EqualValues(t, "Ood Bnar", secret.TemporaryString())
require.NoError(t, plugin.Secret.Set([]byte("@{mock:secret} is cool")))
newsecret, err := plugin.Secret.Get()
require.NoError(t, err)
defer newsecret.Destroy()
require.EqualValues(t, "Ood Bnar is cool", newsecret.TemporaryString())
}
func (tsuite *SecretImplTestSuite) TestSecretSetResolveInvalid() {
t := tsuite.T()
cfg := []byte(`
[[inputs.mockup]]
secret = "@{mock:secret}"
`)
c := NewConfig()
require.NoError(t, c.LoadConfigData(cfg, EmptySourcePath))
require.Len(t, c.Inputs, 1)
// Create a mockup secretstore
store := &MockupSecretStore{
Secrets: map[string][]byte{"secret": []byte("Ood Bnar")},
Dynamic: true,
}
require.NoError(t, store.Init())
c.SecretStores["mock"] = store
require.NoError(t, c.LinkSecrets())
plugin := c.Inputs[0].Input.(*MockupSecretPlugin)
secret, err := plugin.Secret.Get()
require.NoError(t, err)
defer secret.Destroy()
require.EqualValues(t, "Ood Bnar", secret.TemporaryString())
err = plugin.Secret.Set([]byte("@{mock:another_secret}"))
require.ErrorContains(t, err, `linking new secrets failed: unlinked part "@{mock:another_secret}"`)
}
func (tsuite *SecretImplTestSuite) TestSecretInvalidWarn() {
t := tsuite.T()
// Intercept the log output
var buf bytes.Buffer
backup := log.Writer()
log.SetOutput(&buf)
defer log.SetOutput(backup)
cfg := []byte(`
[[inputs.mockup]]
secret = "server=a user=@{mock:secret-with-invalid-chars} pass=@{mock:secret_pass}"
`)
c := NewConfig()
require.NoError(t, c.LoadConfigData(cfg, EmptySourcePath))
require.Len(t, c.Inputs, 1)
require.Contains(t, buf.String(), `W! Secret "@{mock:secret-with-invalid-chars}" contains invalid character(s)`)
require.NotContains(t, buf.String(), "@{mock:secret_pass}")
}
func TestSecretImplUnprotected(t *testing.T) {
impl := &unprotectedSecretImpl{}
container := impl.Container([]byte("foobar"))
require.NotNil(t, container)
c, ok := container.(*unprotectedSecretContainer)
require.True(t, ok)
require.Equal(t, "foobar", string(c.buf.content))
buf, err := container.Buffer()
require.NoError(t, err)
require.NotNil(t, buf)
require.Equal(t, []byte("foobar"), buf.Bytes())
require.Equal(t, "foobar", buf.TemporaryString())
require.Equal(t, "foobar", buf.String())
}
func TestSecretImplTestSuiteUnprotected(t *testing.T) {
suite.Run(t, &SecretImplTestSuite{protected: false})
}
func TestSecretImplTestSuiteProtected(t *testing.T) {
suite.Run(t, &SecretImplTestSuite{protected: true})
}
// Mockup (input) plugin for testing to avoid cyclic dependencies
type MockupSecretPlugin struct {
Secret Secret `toml:"secret"`
Expected string `toml:"expected"`
}
func (*MockupSecretPlugin) SampleConfig() string { return "Mockup test secret plugin" }
func (*MockupSecretPlugin) Gather(_ telegraf.Accumulator) error { return nil }
type MockupSecretStore struct {
Secrets map[string][]byte
Dynamic bool
}
func (*MockupSecretStore) Init() error {
return nil
}
func (*MockupSecretStore) SampleConfig() string {
return "Mockup test secret plugin"
}
func (s *MockupSecretStore) Get(key string) ([]byte, error) {
v, found := s.Secrets[key]
if !found {
return nil, errors.New("not found")
}
return v, nil
}
func (s *MockupSecretStore) Set(key, value string) error {
s.Secrets[key] = []byte(value)
return nil
}
func (s *MockupSecretStore) List() ([]string, error) {
keys := make([]string, 0, len(s.Secrets))
for k := range s.Secrets {
keys = append(keys, k)
}
return keys, nil
}
func (s *MockupSecretStore) GetResolver(key string) (telegraf.ResolveFunc, error) {
return func() ([]byte, bool, error) {
v, err := s.Get(key)
return v, s.Dynamic, err
}, nil
}
// Register the mockup plugin on loading
func init() {
// Register the mockup input plugin for the required names
inputs.Add("mockup", func() telegraf.Input { return &MockupSecretPlugin{} })
secretstores.Add("mockup", func(string) telegraf.SecretStore {
return &MockupSecretStore{}
})
}

View file

@ -0,0 +1,94 @@
package config
import (
"bytes"
"unsafe"
)
type unprotectedSecretImpl struct{}
func (*unprotectedSecretImpl) Container(secret []byte) secretContainer {
return &unprotectedSecretContainer{buf: newUnlockedBuffer(secret)}
}
func (*unprotectedSecretImpl) EmptyBuffer() SecretBuffer {
return &unlockedBuffer{}
}
func (*unprotectedSecretImpl) Wipe(secret []byte) {
for i := range secret {
secret[i] = 0
}
}
type unlockedBuffer struct {
content []byte
}
func newUnlockedBuffer(secret []byte) *unlockedBuffer {
return &unlockedBuffer{bytes.Clone(secret)}
}
func (lb *unlockedBuffer) Size() int {
return len(lb.content)
}
func (*unlockedBuffer) Grow(int) {
// The underlying byte-buffer will grow dynamically
}
func (lb *unlockedBuffer) Bytes() []byte {
return lb.content
}
func (lb *unlockedBuffer) TemporaryString() string {
//nolint:gosec // G103: Valid use of unsafe call to cast underlying bytes to string
return unsafe.String(&lb.content[0], len(lb.content))
}
func (lb *unlockedBuffer) String() string {
return string(lb.content)
}
func (lb *unlockedBuffer) Destroy() {
selectedImpl.Wipe(lb.content)
lb.content = nil
}
type unprotectedSecretContainer struct {
buf *unlockedBuffer
}
func (c *unprotectedSecretContainer) Destroy() {
if c.buf == nil {
return
}
// Wipe the secret from memory
c.buf.Destroy()
c.buf = nil
}
func (c *unprotectedSecretContainer) Equals(ref []byte) (bool, error) {
if c.buf == nil {
return false, nil
}
return bytes.Equal(c.buf.content, ref), nil
}
func (c *unprotectedSecretContainer) Buffer() (SecretBuffer, error) {
if c.buf == nil {
return &unlockedBuffer{}, nil
}
return newUnlockedBuffer(c.buf.content), nil
}
func (*unprotectedSecretContainer) AsBuffer(secret []byte) SecretBuffer {
return &unlockedBuffer{secret}
}
func (c *unprotectedSecretContainer) Replace(secret []byte) {
c.buf = newUnlockedBuffer(secret)
}

28
config/testdata/addressbook.proto vendored Normal file
View file

@ -0,0 +1,28 @@
syntax = "proto3";
package addressbook;
message Person {
string name = 1;
int32 id = 2; // Unique ID number for this person.
string email = 3;
uint32 age = 4;
enum PhoneType {
MOBILE = 0;
HOME = 1;
WORK = 2;
}
message PhoneNumber {
string number = 1;
PhoneType type = 2;
}
repeated PhoneNumber phones = 5;
}
message AddressBook {
repeated Person people = 1;
repeated string tags = 2;
}

4
config/testdata/azure_monitor.toml vendored Normal file
View file

@ -0,0 +1,4 @@
[[outputs.azure_monitor]]
[[outputs.azure_monitor]]
namespace_prefix = ""

2
config/testdata/default_parser.toml vendored Normal file
View file

@ -0,0 +1,2 @@
[[inputs.file]]
files = ["metrics"]

View file

@ -0,0 +1,2 @@
[[inputs.exec]]
command = '/usr/bin/echo {"value": 42}'

View file

@ -0,0 +1,8 @@
[[inputs.file]]
pass = ["foo"]
fieldpass = ["bar"]
fieldinclude = ["baz"]
drop = ["foo"]
fielddrop = ["bar"]
fieldexclude = ["baz"]

99
config/testdata/envvar_comments.toml vendored Normal file
View file

@ -0,0 +1,99 @@
# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
[global_tags]
[agent]
interval = "10s"
round_interval = true
metric_batch_size = 1000
metric_buffer_limit = 10000
collection_jitter = "0s"
flush_interval = '10s'
flush_jitter = "0s"
precision = ""
hostname = ''
omit_hostname = false
[[outputs.influxdb]]
setting1 = '#'#test
setting2 = '''#'''#test
setting3 = "#"#test
setting4 = """#"""#test
wicked1 = "\""#test
wicked2 = """\""""#test
[[inputs.cpu]]
percpu = true
#totalcpu = true
# collect_cpu_time = false
## report_active = false
[[a.plugin]]
mylist = [
"value 1", # a good value
"value 2", # a better value
"value 3", "value 4",
'value5', """tagwith#value""",
] # Should work
[[some.stuff]]
a = 'not a #comment'
b = '''not a #comment'''
c = "not a #comment"
d = """not a #comment"""
e = '''not a #comment containing "quotes"'''
f = '''not a #comment containing 'quotes'?'''
g = """not a #comment containing "quotes"?"""
# Issue #14237
[[inputs.myplugin]]
value = '''This isn't a #comment.'''
[[processors.starlark]]
script = """
# Drop fields if they contain a string.
#
# Example Input:
# measurement,host=hostname a=1,b="somestring" 1597255410000000000
#
# Example Output:
# measurement,host=hostname a=1 1597255410000000000
def apply(metric):
for k, v in metric.fields.items():
if type(v) == "string":
metric.fields.pop(k)
return metric
"""
[[processors.starlark]]
script = '''
# Drop fields if they contain a string.
#
# Example Input:
# measurement,host=hostname a=1,b="somestring" 1597255410000000000
#
# Example Output:
# measurement,host=hostname a=1 1597255410000000000
def apply(metric):
for k, v in metric.fields.items():
if type(v) == "string":
metric.fields.pop(k)
return metric
'''

View file

@ -0,0 +1,99 @@
[global_tags]
[agent]
interval = "10s"
round_interval = true
metric_batch_size = 1000
metric_buffer_limit = 10000
collection_jitter = "0s"
flush_interval = '10s'
flush_jitter = "0s"
precision = ""
hostname = ''
omit_hostname = false
[[outputs.influxdb]]
setting1 = '#'
setting2 = '''#'''
setting3 = "#"
setting4 = """#"""
wicked1 = "\""
wicked2 = """\""""
[[inputs.cpu]]
percpu = true
[[a.plugin]]
mylist = [
"value 1",
"value 2",
"value 3", "value 4",
'value5', """tagwith#value""",
]
[[some.stuff]]
a = 'not a #comment'
b = '''not a #comment'''
c = "not a #comment"
d = """not a #comment"""
e = '''not a #comment containing "quotes"'''
f = '''not a #comment containing 'quotes'?'''
g = """not a #comment containing "quotes"?"""
[[inputs.myplugin]]
value = '''This isn't a #comment.'''
[[processors.starlark]]
script = """
# Drop fields if they contain a string.
#
# Example Input:
# measurement,host=hostname a=1,b="somestring" 1597255410000000000
#
# Example Output:
# measurement,host=hostname a=1 1597255410000000000
def apply(metric):
for k, v in metric.fields.items():
if type(v) == "string":
metric.fields.pop(k)
return metric
"""
[[processors.starlark]]
script = '''
# Drop fields if they contain a string.
#
# Example Input:
# measurement,host=hostname a=1,b="somestring" 1597255410000000000
#
# Example Output:
# measurement,host=hostname a=1 1597255410000000000
def apply(metric):
for k, v in metric.fields.items():
if type(v) == "string":
metric.fields.pop(k)
return metric
'''

View file

@ -0,0 +1,2 @@
[[processors.processor]]
metricpass = '("state" in tags && tags.state == "on") || time > timestamp("2023-04-24T00:00:00Z")'

7
config/testdata/inline_table.toml vendored Normal file
View file

@ -0,0 +1,7 @@
[[outputs.http]]
headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" }
taginclude = ["org_id"]
[[outputs.http]]
headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" }
taginclude = ["org_id"]

2
config/testdata/invalid_field.toml vendored Normal file
View file

@ -0,0 +1,2 @@
[[inputs.http_listener_v2]]
not_a_field = true

View file

@ -0,0 +1,5 @@
[[inputs.parser]]
data_format = "xpath_json"
[[inputs.parser.xpath]]
not_a_field = true

Some files were not shown because too many files have changed in this diff Show more