1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,142 @@
package main
import (
"bytes"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"text/template"
"text/template/parse"
)
func extractIncludes(tmpl *template.Template) []string {
var includes []string
for _, node := range tmpl.Root.Nodes {
if n, ok := node.(*parse.TemplateNode); ok {
includes = append(includes, n.Name)
}
}
return includes
}
func absolutePath(root, fn string) (string, error) {
pwd, err := filepath.Abs(fn)
if err != nil {
return "", fmt.Errorf("cannot determine absolute location of %q: %w", fn, err)
}
pwd, err = filepath.Rel(root, filepath.Dir(pwd))
if err != nil {
return "", fmt.Errorf("cannot determine location of %q relative to %q: %w", pwd, root, err)
}
return string(filepath.Separator) + pwd, nil
}
func main() {
// Estimate Telegraf root to be able to handle absolute paths
cwd, err := os.Getwd()
if err != nil {
log.Fatalf("Cannot get working directory: %v", err)
}
cwd, err = filepath.Abs(cwd)
if err != nil {
log.Fatalf("Cannot resolve working directory: %v", err)
}
var root string
idx := strings.LastIndex(cwd, filepath.FromSlash("/plugins/"))
if idx <= 0 {
log.Fatalln("Cannot determine include root!")
}
root = cwd[:idx]
var parent, inputFilename, outputFilename string
switch len(os.Args) {
case 1:
parent = strings.TrimPrefix(filepath.ToSlash(cwd[idx:]), "/plugins/")
parent = strings.ReplaceAll(parent, "/", ".")
inputFilename = "sample.conf.in"
outputFilename = "sample.conf"
case 2:
parent = os.Args[1]
inputFilename = "sample.conf.in"
outputFilename = "sample.conf"
case 3:
parent = os.Args[1]
inputFilename = os.Args[2]
if !strings.HasSuffix(inputFilename, ".in") {
log.Fatalf("Template filename %q does not have '.in' suffix!", inputFilename)
}
outputFilename = strings.TrimSuffix(inputFilename, ".in")
case 4:
parent = os.Args[1]
inputFilename = os.Args[2]
outputFilename = os.Args[3]
default:
log.Fatalln("Invalid number of arguments")
}
roottmpl := template.New(inputFilename)
known := make(map[string]bool)
inroot, err := absolutePath(root, inputFilename)
if err != nil {
log.Fatal(err)
}
unresolved := map[string]string{inputFilename: filepath.Join(inroot, inputFilename)}
for {
if len(unresolved) == 0 {
break
}
newUnresolved := make(map[string]string)
for name, fn := range unresolved {
if strings.HasPrefix(filepath.ToSlash(fn), "/") {
fn = filepath.Join(root, fn)
}
if known[name] {
// Include already resolved, skipping
continue
}
tmpl, err := template.ParseFiles(fn)
if err != nil {
log.Fatalf("Reading template %q failed: %v", fn, err)
}
known[name] = true
if _, err := roottmpl.AddParseTree(name, tmpl.Tree); err != nil {
log.Fatalf("Adding include %q failed: %v", fn, err)
}
// For relative paths we need to make it relative to the include
pwd, err := filepath.Abs(fn)
if err != nil {
log.Fatalf("Cannot determine absolute location of %q: %v", fn, err)
}
pwd, err = filepath.Rel(root, filepath.Dir(pwd))
if err != nil {
log.Fatalf("Cannot determine location of %q relative to %q: %v", pwd, root, err)
}
pwd = string(filepath.Separator) + pwd
for _, iname := range extractIncludes(tmpl) {
if !strings.HasPrefix(iname, "/") {
newUnresolved[iname] = filepath.Join(pwd, iname)
} else {
newUnresolved[iname] = iname
}
}
}
unresolved = newUnresolved
}
defines := map[string]string{"parent": parent}
var buf bytes.Buffer
if err := roottmpl.Execute(&buf, defines); err != nil {
log.Fatalf("Executing template failed: %v", err)
}
if err := os.WriteFile(outputFilename, buf.Bytes(), 0640); err != nil {
log.Fatalf("Writing output %q failed: %v", outputFilename, err)
}
}

View file

@ -0,0 +1,102 @@
# Telegraf customization tool
Telegraf's `custom_builder` is a tool to select the plugins compiled into the
Telegraf binary. By doing so, Telegraf can become smaller, saving both disk
space and memory if only a sub-set of plugins is selected.
## Requirements
For compiling the customized binary you need the
[Golang language](https://go.dev/) as well as the `make` build system.
The minimum required version of Golang can be found in the *Build From Source*
section of the `README.md` file of your version. Both the `go` and the `make`
command must be available in your path.
## Downloading code
The first step is to download the Telegraf repository for the version you are
planning to customize. In the example below, we want to use `v1.29.5` but you
might also use other versions or `master`.
```shell
# git clone --branch v1.29.5 --single-branch https://github.com/influxdata/telegraf.git
...
# cd telegraf
```
Alternatively, you can download the source tarball or zip-archive of a
[Telegraf release](https://github.com/influxdata/telegraf/releases).
## Building
To build `custom_builder` run the following command:
```shell
# make build_tools
```
The resulting binary is located in the `tools/custom_builder` folder.
## Running
The easiest way of building a customized Telegraf is to use your Telegraf
configuration file(s). Assuming your configuration is in
`/etc/telegraf/telegraf.conf` you can run
```shell
# ./tools/custom_builder/custom_builder --config /etc/telegraf/telegraf.conf
```
to build a Telegraf binary tailored to your configuration. You can also specify
a configuration directory similar to Telegraf itself. To additionally use the
configurations in `/etc/telegraf/telegraf.d` run
```shell
# ./tools/custom_builder/custom_builder \
--config /etc/telegraf/telegraf.conf \
--config-dir /etc/telegraf/telegraf.d
```
Configurations can also be retrieved from remote locations just like for
Telegraf.
```shell
# ./tools/custom_builder/custom_builder --config http://myserver/telegraf.conf
```
will download the configuration from `myserver`.
The `--config` and `--config-dir` option can be used multiple times. In case
you want to deploy Telegraf to multiple systems with different configurations,
simply specify the super-set of all configurations you have. `custom_builder`
will figure out the list for you
```shell
# ./tools/custom_builder/custom_builder \
--config system1/telegraf.conf \
--config system2/telegraf.conf \
--config ... \
--config systemN/telegraf.conf \
--config-dir system1/telegraf.d \
--config-dir system2/telegraf.d \
--config-dir ... \
--config-dir systemN/telegraf.d
```
The Telegraf customization uses
[Golang's build-tags](https://pkg.go.dev/go/build#hdr-Build_Constraints) to
select the set of plugins. To see which tags are set use the `--tags` flag.
To get more help run
```shell
# ./tools/custom_builder/custom_builder --help
```
## Notes
Please make sure to include all `parsers` and `serializers` you intend to use
and check the enabled-plugins list.
Additional plugins can potentially be enabled automatically due to dependencies
without being shown in the enabled-plugins list.

View file

@ -0,0 +1,232 @@
package main
import (
"bytes"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
"github.com/influxdata/telegraf/config"
)
type instance struct {
category string
name string
enabled bool
dataformat []string
}
type selection struct {
plugins map[string][]instance
}
func ImportConfigurations(files, dirs []string) (*selection, int, error) {
sel := &selection{
plugins: make(map[string][]instance),
}
// Gather all configuration files
var filenames []string
filenames = append(filenames, files...)
for _, dir := range dirs {
// Walk the directory and get the packages
elements, err := os.ReadDir(dir)
if err != nil {
return nil, 0, fmt.Errorf("reading directory %q failed: %w", dir, err)
}
for _, element := range elements {
if element.IsDir() || filepath.Ext(element.Name()) != ".conf" {
continue
}
filenames = append(filenames, filepath.Join(dir, element.Name()))
}
}
if len(filenames) == 0 {
return sel, 0, errors.New("no configuration files given or found")
}
// Do the actual import
err := sel.importFiles(filenames)
return sel, len(filenames), err
}
func (s *selection) Filter(p packageCollection) (*packageCollection, error) {
enabled := packageCollection{
packages: make(map[string][]packageInfo),
}
implicitlyConfigured := make(map[string]bool)
for category, pkgs := range p.packages {
for _, pkg := range pkgs {
key := category + "." + pkg.Plugin
instances, found := s.plugins[key]
if !found {
continue
}
// The package was configured so add it to the enabled list
enabled.packages[category] = append(enabled.packages[category], pkg)
// Check if the instances configured a data-format and decide if it
// is a parser or serializer depending on the plugin type.
// If no data-format was configured, check the default settings in
// case this plugin supports a data-format setting but the user
// didn't set it.
for _, instance := range instances {
for _, dataformat := range instance.dataformat {
switch category {
case "inputs":
implicitlyConfigured["parsers."+dataformat] = true
case "processors":
implicitlyConfigured["parsers."+dataformat] = true
// The execd processor requires both a parser and serializer
if pkg.Plugin == "execd" {
implicitlyConfigured["serializers."+dataformat] = true
}
case "outputs":
implicitlyConfigured["serializers."+dataformat] = true
}
}
if len(instance.dataformat) == 0 {
if pkg.DefaultParser != "" {
implicitlyConfigured["parsers."+pkg.DefaultParser] = true
}
if pkg.DefaultSerializer != "" {
implicitlyConfigured["serializers."+pkg.DefaultSerializer] = true
}
}
}
}
}
// Iterate over all plugins AGAIN to add the implicitly configured packages
// such as parsers and serializers
for category, pkgs := range p.packages {
for _, pkg := range pkgs {
key := category + "." + pkg.Plugin
// Skip the plugins that were explicitly configured as we already
// added them above.
if _, found := s.plugins[key]; found {
continue
}
// Add the package if it was implicitly configured e.g. by a
// 'data_format' setting or by a default value for the data-format
if _, implicit := implicitlyConfigured[key]; implicit {
enabled.packages[category] = append(enabled.packages[category], pkg)
}
}
}
// Check if all packages in the config were covered
available := make(map[string]bool)
for category, pkgs := range p.packages {
for _, pkg := range pkgs {
available[category+"."+pkg.Plugin] = true
}
}
var unknown []string
for pkg := range s.plugins {
if !available[pkg] {
unknown = append(unknown, pkg)
}
}
for pkg := range implicitlyConfigured {
if !available[pkg] {
unknown = append(unknown, pkg)
}
}
if len(unknown) > 0 {
return nil, fmt.Errorf("configured but unknown packages %q", strings.Join(unknown, ","))
}
return &enabled, nil
}
func (s *selection) importFiles(configurations []string) error {
for _, cfg := range configurations {
buf, _, err := config.LoadConfigFile(cfg)
if err != nil {
return fmt.Errorf("reading %q failed: %w", cfg, err)
}
if err := s.extractPluginsFromConfig(buf); err != nil {
return fmt.Errorf("extracting plugins from %q failed: %w", cfg, err)
}
}
return nil
}
func (s *selection) extractPluginsFromConfig(buf []byte) error {
table, err := toml.Parse(trimBOM(buf))
if err != nil {
return fmt.Errorf("parsing TOML failed: %w", err)
}
for category, subtbl := range table.Fields {
// Check if we should handle the category, i.e. it contains plugins
// to configure.
var valid bool
for _, c := range categories {
if c == category {
valid = true
break
}
}
if !valid {
continue
}
categoryTbl, ok := subtbl.(*ast.Table)
if !ok {
continue
}
for name, data := range categoryTbl.Fields {
key := category + "." + name
cfg := instance{
category: category,
name: name,
enabled: true,
}
// We need to check the data_format field to get all required
// parsers and serializers
pluginTables, ok := data.([]*ast.Table)
if ok {
for _, subsubtbl := range pluginTables {
var dataformat string
for field, fieldData := range subsubtbl.Fields {
if field != "data_format" {
continue
}
kv := fieldData.(*ast.KeyValue)
option := kv.Value.(*ast.String)
dataformat = option.Value
}
if dataformat != "" {
cfg.dataformat = append(cfg.dataformat, dataformat)
}
}
}
s.plugins[key] = append(s.plugins[key], cfg)
}
}
return nil
}
func trimBOM(f []byte) []byte {
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
}

View file

@ -0,0 +1,178 @@
package main
import (
"bytes"
"errors"
"flag"
"fmt"
"log"
"os"
"os/exec"
"strings"
)
var buildTargets = []string{"build"}
var categories = []string{
"aggregators",
"inputs",
"outputs",
"parsers",
"processors",
"secretstores",
"serializers",
}
const description = `
This is a tool build Telegraf with a custom set of plugins. The plugins are
select according to the specified Telegraf configuration files. This allows
to shrink the binary size by only selecting the plugins you really need.
A more detailed documentation is available at
http://github.com/influxdata/telegraf/tools/custom_builder/README.md
`
const examples = `
The following command with customize Telegraf to fit the configuration found
at the default locations
custom_builder --config /etc/telegraf/telegraf.conf --config-dir /etc/telegraf/telegraf.d
You can the --config and --config-dir multiple times
custom_builder --config global.conf --config myinputs.conf --config myoutputs.conf
or use one or more remote address(es) to load the config
custom_builder --config global.conf --config http://myserver/plugins.conf
Combinations of local and remote config as well as config directories are
possible.
`
func usage() {
fmt.Fprint(flag.CommandLine.Output(), description)
fmt.Fprintln(flag.CommandLine.Output(), "")
fmt.Fprintln(flag.CommandLine.Output(), "Usage:")
fmt.Fprintln(flag.CommandLine.Output(), " custom_builder [flags]")
fmt.Fprintln(flag.CommandLine.Output(), "")
fmt.Fprintln(flag.CommandLine.Output(), "Flags:")
flag.PrintDefaults()
fmt.Fprintln(flag.CommandLine.Output(), "")
fmt.Fprintln(flag.CommandLine.Output(), "Examples:")
fmt.Fprint(flag.CommandLine.Output(), examples)
fmt.Fprintln(flag.CommandLine.Output(), "")
}
type cmdConfig struct {
dryrun bool
showtags bool
migrations bool
quiet bool
root string
configFiles []string
configDirs []string
}
func main() {
var cfg cmdConfig
flag.Func("config",
"Import plugins from configuration file (can be used multiple times)",
func(s string) error {
cfg.configFiles = append(cfg.configFiles, s)
return nil
},
)
flag.Func("config-dir",
"Import plugins from configs in the given directory (can be used multiple times)",
func(s string) error {
cfg.configDirs = append(cfg.configDirs, s)
return nil
},
)
flag.BoolVar(&cfg.dryrun, "dry-run", false, "Skip the actual building step")
flag.BoolVar(&cfg.quiet, "quiet", false, "Print fewer log messages")
flag.BoolVar(&cfg.migrations, "migrations", false, "Include configuration migrations")
flag.BoolVar(&cfg.showtags, "tags", false, "Show build-tags used")
flag.Usage = usage
flag.Parse()
tagset, err := process(&cfg)
if err != nil {
log.Fatalln(err)
}
if len(tagset) == 0 {
log.Fatalln("Nothing selected!")
}
tags := "custom,"
if cfg.migrations {
tags += "migrations,"
}
tags += strings.Join(tagset, ",")
if cfg.showtags {
fmt.Printf("Build tags: %s\n", tags)
}
if !cfg.dryrun {
// Perform the build
var out bytes.Buffer
makeCmd := exec.Command("make", buildTargets...)
makeCmd.Env = append(os.Environ(), "BUILDTAGS="+tags)
makeCmd.Stdout = &out
makeCmd.Stderr = &out
if !cfg.quiet {
log.Println("Running build...")
}
if err := makeCmd.Run(); err != nil {
fmt.Println(out.String())
log.Fatalf("Running make failed: %v", err)
}
if !cfg.quiet {
fmt.Println(out.String())
}
} else if !cfg.quiet {
log.Println("DRY-RUN: Skipping build.")
}
}
func process(cmdcfg *cmdConfig) ([]string, error) {
// Check configuration options
if len(cmdcfg.configFiles) == 0 && len(cmdcfg.configDirs) == 0 {
return nil, errors.New("no configuration specified")
}
// Collect all available plugins
packages := packageCollection{root: cmdcfg.root}
if err := packages.CollectAvailable(); err != nil {
return nil, fmt.Errorf("collecting plugins failed: %w", err)
}
// Import the plugin list from Telegraf configuration files
log.Println("Importing configuration file(s)...")
cfg, nfiles, err := ImportConfigurations(cmdcfg.configFiles, cmdcfg.configDirs)
if err != nil {
return nil, fmt.Errorf("importing configuration(s) failed: %w", err)
}
if !cmdcfg.quiet {
log.Printf("Found %d configuration files...", nfiles)
}
// Check if we do have a config
if nfiles == 0 {
return nil, errors.New("no configuration files loaded")
}
// Process the plugin list with the given config. This will
// only keep the plugins that adhere to the filtering criteria.
enabled, err := cfg.Filter(packages)
if err != nil {
return nil, fmt.Errorf("filtering packages failed: %w", err)
}
if !cmdcfg.quiet {
enabled.Print()
}
// Extract the build-tags
return enabled.ExtractTags(), nil
}

View file

@ -0,0 +1,56 @@
package main
import (
"bufio"
"io"
"log"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
)
func TestCases(t *testing.T) {
// Silence the output
log.SetOutput(io.Discard)
// Get all directories in testdata
folders, err := os.ReadDir("testcases")
require.NoError(t, err)
for _, f := range folders {
// Only handle folders
if !f.IsDir() {
continue
}
configFilename := filepath.Join("testcases", f.Name(), "telegraf.conf")
expecedFilename := filepath.Join("testcases", f.Name(), "expected.tags")
t.Run(f.Name(), func(t *testing.T) {
// Read the expected output
file, err := os.Open(expecedFilename)
require.NoError(t, err)
defer file.Close()
var expected []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
expected = append(expected, scanner.Text())
}
require.NoError(t, scanner.Err())
// Configure the command
cfg := &cmdConfig{
dryrun: true,
quiet: true,
configFiles: []string{configFilename},
root: "../..",
}
actual, err := process(cfg)
require.NoError(t, err)
require.EqualValues(t, expected, actual)
})
}
}

View file

@ -0,0 +1,345 @@
package main
import (
"bufio"
"errors"
"fmt"
"go/ast"
"go/parser"
"go/token"
"io/fs"
"log"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"github.com/influxdata/telegraf/filter"
)
// Define the categories we can handle and package filters
var packageFilter = filter.MustCompile([]string{
"*/all",
"*/*_test",
"inputs/example",
"inputs/main",
})
type packageInfo struct {
Category string
Plugin string
Path string
Tag string
DefaultParser string
DefaultSerializer string
}
type packageCollection struct {
root string
packages map[string][]packageInfo
}
// Define the package exceptions
var exceptions = map[string][]packageInfo{
"parsers": {
{
Category: "parsers",
Plugin: "influx_upstream",
Path: "plugins/parsers/influx/influx_upstream",
Tag: "parsers.influx",
},
},
}
func (p *packageCollection) collectPackagesForCategory(category string) error {
var entries []packageInfo
pluginDir := filepath.Join(p.root, "plugins", category)
// Add exceptional packages if any
if pkgs, found := exceptions[category]; found {
entries = append(entries, pkgs...)
}
// Walk the directory and get the packages
elements, err := os.ReadDir(pluginDir)
if err != nil {
return err
}
for _, element := range elements {
path := filepath.Join(pluginDir, element.Name())
if !element.IsDir() {
continue
}
var fset token.FileSet
pkgs, err := parser.ParseDir(&fset, path, sourceFileFilter, parser.ParseComments)
if err != nil {
log.Printf("parsing directory %q failed: %v", path, err)
continue
}
for name, pkg := range pkgs {
if packageFilter.Match(category + "/" + name) {
continue
}
// Extract the names of the plugins registered by this package
registeredNames := extractRegisteredNames(pkg, category)
if len(registeredNames) == 0 {
log.Printf("WARN: Could not extract information from package %q", name)
continue
}
// Extract potential default parsers for input and processor packages
// as well as serializers for the output package
var defaultParser, defaultSerializer string
switch category {
case "inputs":
dataformat, err := extractDefaultDataFormat(path)
if err != nil {
log.Printf("Getting default data-format for %s.%s failed: %v", category, name, err)
}
defaultParser = dataformat
case "processors":
dataformat, err := extractDefaultDataFormat(path)
if err != nil {
log.Printf("Getting default data-format for %s.%s failed: %v", category, name, err)
}
defaultParser = dataformat
// The execd processor requires both a parser and serializer
if name == "execd" {
defaultSerializer = dataformat
}
case "outputs":
dataformat, err := extractDefaultDataFormat(path)
if err != nil {
log.Printf("Getting default data-format for %s.%s failed: %v", category, name, err)
}
defaultSerializer = dataformat
}
for _, plugin := range registeredNames {
path := filepath.Join("plugins", category, element.Name())
tag := category + "." + element.Name()
entries = append(entries, packageInfo{
Category: category,
Plugin: plugin,
Path: filepath.ToSlash(path),
Tag: tag,
DefaultParser: defaultParser,
DefaultSerializer: defaultSerializer,
})
}
}
}
p.packages[category] = entries
return nil
}
func (p *packageCollection) CollectAvailable() error {
p.packages = make(map[string][]packageInfo)
for _, category := range categories {
if err := p.collectPackagesForCategory(category); err != nil {
return err
}
}
return nil
}
func (p *packageCollection) ExtractTags() []string {
var tags []string
for category, pkgs := range p.packages {
_ = category
for _, pkg := range pkgs {
tags = append(tags, pkg.Tag)
}
}
sort.Strings(tags)
return tags
}
func (p *packageCollection) Print() {
fmt.Println("-------------------------------------------------------------------------------")
fmt.Println("Enabled plugins:")
fmt.Println("-------------------------------------------------------------------------------")
for _, category := range categories {
pkgs := p.packages[category]
sort.Slice(pkgs, func(i, j int) bool { return pkgs[i].Plugin < pkgs[j].Plugin })
fmt.Printf("%s (%d):\n", category, len(pkgs))
for _, pkg := range pkgs {
fmt.Printf(" %-30s %s\n", pkg.Plugin, pkg.Path)
}
fmt.Println("-------------------------------------------------------------------------------")
}
}
func sourceFileFilter(d fs.FileInfo) bool {
return strings.HasSuffix(d.Name(), ".go") && !strings.HasSuffix(d.Name(), "_test.go")
}
func findFunctionDecl(file *ast.File, name string) *ast.FuncDecl {
for _, decl := range file.Decls {
d, ok := decl.(*ast.FuncDecl)
if !ok {
continue
}
if d.Name.Name == name && d.Recv == nil {
return d
}
}
return nil
}
func findAddStatements(decl *ast.FuncDecl, pluginType string) []*ast.CallExpr {
var statements []*ast.CallExpr
for _, stmt := range decl.Body.List {
s, ok := stmt.(*ast.ExprStmt)
if !ok {
continue
}
call, ok := s.X.(*ast.CallExpr)
if !ok {
continue
}
fun, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
continue
}
e, ok := fun.X.(*ast.Ident)
if !ok {
continue
}
if e.Name == pluginType && (fun.Sel.Name == "Add" || fun.Sel.Name == "AddStreaming") {
statements = append(statements, call)
}
}
return statements
}
func extractPluginInfo(file *ast.File, pluginType string, declarations map[string]string) ([]string, error) {
var registeredNames []string
decl := findFunctionDecl(file, "init")
if decl == nil {
return nil, nil
}
calls := findAddStatements(decl, pluginType)
if len(calls) == 0 {
return nil, nil
}
for _, call := range calls {
switch arg := call.Args[0].(type) {
case *ast.Ident:
resval, found := declarations[arg.Name]
if !found {
return nil, fmt.Errorf("cannot resolve registered name variable %q", arg.Name)
}
registeredNames = append(registeredNames, strings.Trim(resval, "\""))
case *ast.BasicLit:
if arg.Kind != token.STRING {
return nil, errors.New("registered name is not a string")
}
registeredNames = append(registeredNames, strings.Trim(arg.Value, "\""))
default:
return nil, fmt.Errorf("unhandled argument type: %v (%T)", arg, arg)
}
}
return registeredNames, nil
}
//nolint:staticcheck // Use deprecated ast.Package for now
func extractPackageDeclarations(pkg *ast.Package) map[string]string {
declarations := make(map[string]string)
for _, file := range pkg.Files {
for _, d := range file.Decls {
gendecl, ok := d.(*ast.GenDecl)
if !ok {
continue
}
for _, spec := range gendecl.Specs {
spec, ok := spec.(*ast.ValueSpec)
if !ok {
continue
}
for _, id := range spec.Names {
valspec, ok := id.Obj.Decl.(*ast.ValueSpec)
if !ok || len(valspec.Values) != 1 {
continue
}
valdecl, ok := valspec.Values[0].(*ast.BasicLit)
if !ok || valdecl.Kind != token.STRING {
continue
}
declarations[id.Name] = strings.Trim(valdecl.Value, "\"")
}
}
}
}
return declarations
}
//nolint:staticcheck // Use deprecated ast.Package for now
func extractRegisteredNames(pkg *ast.Package, pluginType string) []string {
var registeredNames []string
// Extract all declared variables of all files. This might be necessary when
// using references across multiple files
declarations := extractPackageDeclarations(pkg)
// Find the registry Add statement and extract all registered names
for fn, file := range pkg.Files {
names, err := extractPluginInfo(file, pluginType, declarations)
if err != nil {
log.Printf("%q error: %v", fn, err)
continue
}
registeredNames = append(registeredNames, names...)
}
return registeredNames
}
func extractDefaultDataFormat(pluginDir string) (string, error) {
re := regexp.MustCompile(`^\s*#?\s*data_format\s*=\s*"(.*)"\s*$`)
// Exception for exec input which uses JSON by default
if filepath.ToSlash(pluginDir) == "plugins/inputs/exec" {
return "json", nil
}
// Walk all config files in the package directory
elements, err := os.ReadDir(pluginDir)
if err != nil {
return "", err
}
for _, element := range elements {
path := filepath.Join(pluginDir, element.Name())
if element.IsDir() || filepath.Ext(element.Name()) != ".conf" {
continue
}
// Read the config and search for a "data_format" entry
file, err := os.Open(path)
if err != nil {
return "", err
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
match := re.FindStringSubmatch(scanner.Text())
if len(match) == 2 {
return match[1], nil
}
}
}
return "", nil
}

View file

@ -0,0 +1,5 @@
inputs.disk
inputs.mem
inputs.swap
inputs.system
outputs.datadog

View file

@ -0,0 +1,65 @@
## Telegraf Configuration for ThinClients
## /etc/telegraf/telegraf.conf
[global_tags]
service_name = "thinclient"
env = "prod"
team = "planetexpress"
## Configuration for telegraf agent
[agent]
## Data input and output settings
interval = "10s"
round_interval = true
metric_batch_size = 1000
metric_buffer_limit = 10000
collection_jitter = "0s"
flush_interval = "10s"
flush_jitter = "5s"
## Logging configuration
debug = false
quiet = false
# emtpy string means log to stderr
logfile = ""
## host configuration
# if emtpty use os.hostname()
hostname = ""
omit_hostname = false
# Configuration for sending metrics to Datadog
[[outputs.datadog]]
## Datadog API key
apikey = "${datadog_secret}"
## Connection timeout.
timeout = "5s"
## Write URL override; useful for debugging.
url = "${datadog_url}"
## Metrics to log
[[inputs.system]]
name_prefix = "dg.systemengineering.thinclient."
# default configuration; getting uptime values.
[[inputs.mem]]
name_prefix = "dg.systemengineering.thinclient."
# no configuration
[[inputs.disk]]
name_prefix = "dg.systemengineering.thinclient."
## By default stats will be gathered for all mount points.
## Set mount_points will restrict the stats to only the specified mount points.
mount_points = ["/"]
[[inputs.swap]]
name_prefix = "dg.systemengineering.thinclient."
## Monitoring SWAP (zswap) usage
## Ignore mount points by filesystem type.
#ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]

View file

@ -0,0 +1,4 @@
inputs.mqtt_consumer
outputs.influxdb_v2
parsers.json_v2
parsers.value

View file

@ -0,0 +1,39 @@
[[inputs.mqtt_consumer]]
name_override = "qr_mqtt_message"
servers = ["tcp://mosquitto:1883"]
topics = [
"<REDACTED>"
]
qos = 2
persistent_session = false
client_id = "telegraf_qr_code"
data_format = "json_v2"
[[inputs.mqtt_consumer.json_v2]]
[[inputs.mqtt_consumer.json_v2.object]]
path = "message.data"
tags = ["data"]
[[inputs.mqtt_consumer]]
name_override = "raw_mqtt_message"
servers = ["tcp://mosquitto:1883"]
# Capture the content as a string since we do not know the format of it...
data_format = "value"
data_type = "string"
# Capture all topics and store the topic as a tag with name "topic"...
topics = ["#"]
topic_tag = "topic"
qos = 2
persistent_session = false
client_id = "telegraf_generic"
[[outputs.influxdb_v2]]
urls = ["http://influxdb:8086"]
token = "${INFLUX_TOKEN}"
organization = "test"
bucket = "test_bucket"

View file

@ -0,0 +1,93 @@
# Dependency license verification tool
This tool allows the verification of information in
`docs/LICENSE_OF_DEPENDENCIES.md` against the linked license
information. To do so, the license reported by the user is
checked against the license classification of the downloaded
license file for each dependency.
## Building
```shell
make build_tools
```
## Running
The simplest way to run the verification tool is to execute
```shell
telegraf$ ./tools/license_checker/license_checker
```
using the current directory as telegraf's root directory and verifies
all licenses. Only errors will be reported by default.
There are multiple options you can use to customize the verification.
Take a look at
```shell
telegraf$ ./tools/license_checker/license_checker --help
```
to get an overview.
As the verification tool downloads each license file linked in the
dependency license document, you should be careful on not exceeding
the access limits of e.g. GitHub by running the tool too frequent.
Some packages change the license for newer versions. As we always
link to the latest license text the classification might not match
the actual license of our used dependency. Furthermore, some license
text might be wrongly classified, or not classified at all. In these
cases, you can use a _whitelist_ to explicitly state the license
SPDX classifier for those packages.
See the [whitelist section](#whitelist) for more details.
The recommended use in telegraf is to run
```shell
telegraf$ ./tools/license_checker/license_checker \
-whitelist ./tools/license_checker/data/whitelist
```
using the code-versioned whitelist. This command will report all
non-matching entries with an `ERR:` prefix.
## Whitelist
Whitelist entries contain explicit license information for
a set of packages to use instead of classification. Each entry
in the whitelist is a line of the form
```text
[comparison operator]<package name>[@vX.Y.Z] <license SPDX>
```
where the _comparison operator_ is one of `>`, `>=`, `=`, `<=` or `<`
and the _license SPDX_ is a [SPDX license identifier][spdx].
In case no package version is specified, the entry matches all versions
of the library. Furthermore, the comparison operator can be omitted
which is equivalent to an exact match (`=`).
The entries are processed in order until the first match is found.
Here is an example of a whitelist. Assume that you have library
`github.com/foo/bar` which started out with the `MIT` license
until version 1.0.0 where it changed to `EFL-1.0` until it again
changed to `EFL-2.0` starting __after__ version 2.3.0. In this case
the whitelist should look like this
```text
<github.com/foo/bar@v1.0.0 MIT
<=github.com/foo/bar@v2.3.0 EFL-1.0
github.com/foo/bar EFL-2.0
```
All versions below 1.0.0 are matched by the first line and are thus
classified as `MIT`. The second line matches everything that is
above 1.0.0 (thus not matched by the first line) until (and including)
2.3.0. The last line with catch everything that was passing the first
two lines i.e. everything after 2.3.0.
[spdx]: https://spdx.org/licenses/

View file

@ -0,0 +1,15 @@
{
"Apache License 2.0": "Apache-2.0",
"BSD 2-Clause with views sentence": "BSD-2-Clause-Views",
"BSD 2-Clause \"Simplified\" License": "BSD-2-Clause",
"BSD 3-Clause \"New\" or \"Revised\" License": "BSD-3-Clause",
"BSD 3-Clause Clear License": "BSD-3-Clause",
"BSD 3-Clause License": "BSD-3-Clause",
"Eclipse Public License - v 1.0": "EPL-1.0",
"Eclipse Public License - v 2.0": "EPL-2.0",
"ISC License": "ISC",
"MIT License": "MIT",
"Mozilla Public License 2.0": "MPL-2.0",
"The Unlicense": "Unlicense",
"zlib License": "Zlib"
}

View file

@ -0,0 +1,2 @@
<github.com/couchbase/goutils@v0.1.2 Apache-2.0
<=github.com/segmentio/asm@v1.2.0 MIT

View file

@ -0,0 +1,245 @@
package main
import (
_ "embed"
"encoding/json"
"flag"
"fmt"
"log"
"os"
"path/filepath"
"sort"
"strings"
"github.com/yuin/goldmark"
"github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/text"
"golang.org/x/mod/modfile"
)
//go:embed data/spdx_mapping.json
var spdxMappingFile []byte
var debug bool
var nameToSPDX map[string]string
func debugf(format string, v ...any) {
if !debug {
return
}
log.Printf("DEBUG: "+format, v...)
}
func main() {
var help, verbose bool
var threshold float64
var whitelistFn, userpkg string
flag.BoolVar(&debug, "debug", false, "output debugging information")
flag.BoolVar(&help, "help", false, "output this help text")
flag.BoolVar(&verbose, "verbose", false, "output verbose information instead of just errors")
flag.Float64Var(&threshold, "threshold", 0.8, "threshold for license classification")
flag.StringVar(&whitelistFn, "whitelist", "", "use the given white-list file for comparison")
flag.StringVar(&userpkg, "package", "", "only test the given package (all by default)")
flag.Parse()
if help || flag.NArg() > 1 {
fmt.Fprintf(flag.CommandLine.Output(), "Usage of %s [options] [telegraf root dir]\n", os.Args[0])
fmt.Fprintf(flag.CommandLine.Output(), "Options:\n")
flag.PrintDefaults()
fmt.Fprintf(flag.CommandLine.Output(), "\n")
fmt.Fprintf(flag.CommandLine.Output(), "Arguments:\n")
fmt.Fprintf(flag.CommandLine.Output(), " telegraf root dir (optional)\n")
fmt.Fprintf(flag.CommandLine.Output(), " path to the root directory of telegraf (default: .)\n")
os.Exit(1)
}
// Setup full-name to license SPDX identifier mapping
if err := json.Unmarshal(spdxMappingFile, &nameToSPDX); err != nil {
log.Fatalf("Unmarshalling license name to SPDX mapping failed: %v", err)
}
// Get required files
path := "."
if flag.NArg() == 1 {
path = flag.Arg(0)
}
moduleFilename := filepath.Join(path, "go.mod")
licenseFilename := filepath.Join(path, "docs", "LICENSE_OF_DEPENDENCIES.md")
var override whitelist
if whitelistFn != "" {
log.Printf("Reading whitelist file %q...", whitelistFn)
if err := override.Parse(whitelistFn); err != nil {
log.Fatalf("Reading whitelist failed: %v", err)
}
}
log.Printf("Reading module file %q...", moduleFilename)
modbuf, err := os.ReadFile(moduleFilename)
if err != nil {
log.Fatal(err)
}
depModules, err := modfile.Parse(moduleFilename, modbuf, nil)
if err != nil {
log.Fatalf("Parsing modules failed: %f", err)
}
debugf("found %d required packages", len(depModules.Require))
dependencies := make(map[string]string)
for _, d := range depModules.Require {
dependencies[d.Mod.Path] = d.Mod.Version
}
log.Printf("Reading license file %q...", licenseFilename)
licensesMarkdown, err := os.ReadFile(licenseFilename)
if err != nil {
log.Fatal(err)
}
// Parse the markdown document
parser := goldmark.DefaultParser()
root := parser.Parse(text.NewReader(licensesMarkdown))
// Prepare a line parser
lineParser := goldmark.DefaultParser()
// Collect the licenses
// For each list we search for the items and parse them.
// Expect a pattern of <package name> <link>.
ignored := 0
var packageInfos []packageInfo
for node := root.FirstChild(); node != nil; node = node.NextSibling() {
listNode, ok := node.(*ast.List)
if !ok {
continue
}
for inode := listNode.FirstChild(); inode != nil; inode = inode.NextSibling() {
itemNode, ok := inode.(*ast.ListItem)
if !ok || itemNode.ChildCount() != 1 {
continue
}
textNode, ok := itemNode.FirstChild().(*ast.TextBlock)
if !ok || textNode.Lines().Len() != 1 {
continue
}
lineSegment := textNode.Lines().At(0)
line := lineSegment.Value(licensesMarkdown)
lineRoot := lineParser.Parse(text.NewReader(line))
if lineRoot.ChildCount() != 1 || lineRoot.FirstChild().ChildCount() < 2 {
log.Printf("WARN: Ignoring item %q due to wrong count (%d/%d)", string(line), lineRoot.ChildCount(), lineRoot.FirstChild().ChildCount())
ignored++
continue
}
var name, license, link string
for lineElementNode := lineRoot.FirstChild().FirstChild(); lineElementNode != nil; lineElementNode = lineElementNode.NextSibling() {
switch v := lineElementNode.(type) {
case *ast.Text:
name += string(v.Value(line))
case *ast.Link:
license = string(v.FirstChild().(*ast.Text).Value(line))
link = string(v.Destination)
default:
debugf("ignoring unknown element %T (%v)", v, v)
}
}
name = strings.TrimSpace(name)
info := packageInfo{
name: name,
version: dependencies[name],
url: strings.TrimSpace(link),
license: strings.TrimSpace(license),
}
info.ToSPDX()
if info.name == "" {
log.Printf("WARN: Ignoring item %q due to empty package name", string(line))
ignored++
continue
}
if info.url == "" {
log.Printf("WARN: Ignoring item %q due to empty url name", string(line))
ignored++
continue
}
if info.license == "" {
log.Printf("WARN: Ignoring item %q due to empty license name", string(line))
ignored++
continue
}
debugf("adding %q with license %q (%s) and version %q at %q...", info.name, info.license, info.spdx, info.version, info.url)
packageInfos = append(packageInfos, info)
}
}
// Get the superset of licenses
if debug {
licenseSet := make(map[string]bool, len(packageInfos))
licenseNames := make([]string, 0, len(packageInfos))
for _, info := range packageInfos {
if found := licenseSet[info.license]; !found {
licenseNames = append(licenseNames, info.license)
}
licenseSet[info.license] = true
}
sort.Strings(licenseNames)
log.Println("Using licenses:")
for _, license := range licenseNames {
log.Println(" " + license)
}
}
// Check the licenses by matching their text and compare the classification result
// with the information provided by the user
var succeeded, warn, failed int
for _, info := range packageInfos {
// Ignore all packages except the ones given by the user (if any)
if userpkg != "" && userpkg != info.name {
continue
}
// Check if we got a whitelist entry for the package
if ok, found := override.Check(info.name, info.version, info.spdx); found {
if ok {
log.Printf("OK: \"%s@%s\" (%s) (whitelist)", info.name, info.version, info.license)
succeeded++
} else {
log.Printf("ERR: \"%s@%s\" (%s) %s does not match whitelist", info.name, info.version, info.license, info.spdx)
failed++
}
continue
}
// Perform a text classification
confidence, err := info.Classify()
if err != nil {
log.Printf("ERR: %q (%s) %v", info.name, info.license, err)
failed++
continue
}
if confidence < threshold {
log.Printf("WARN: %q (%s) has low matching confidence (%.2f%%)", info.name, info.license, confidence)
warn++
continue
}
if verbose {
log.Printf("OK: %q (%s) (%.2f%%)", info.name, info.license, confidence)
}
succeeded++
}
if verbose {
log.Printf("Checked %d licenses (%d ignored lines):", len(packageInfos), ignored)
log.Printf(" %d successful", succeeded)
log.Printf(" %d low confidence", warn)
log.Printf(" %d errors", failed)
}
if failed > 0 {
os.Exit(1)
}
os.Exit(0)
}

View file

@ -0,0 +1,102 @@
package main
import (
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"github.com/google/licensecheck"
)
type packageInfo struct {
name string
version string
license string
url string
spdx string
}
func (pkg *packageInfo) ToSPDX() {
pkg.spdx = nameToSPDX[pkg.license]
}
func (pkg *packageInfo) Classify() (float64, error) {
// Check for a valid SPDX
if pkg.spdx == "" {
return 0.0, fmt.Errorf("empty SPDX for license %q", pkg.license)
}
// Download the license text
source, err := normalizeURL(pkg.url)
if err != nil {
return 0.0, fmt.Errorf("%q is not a valid URL: %w", pkg.url, err)
}
debugf("%q downloading from %q", pkg.name, source)
response, err := http.Get(source.String())
if err != nil {
return 0.0, fmt.Errorf("download from %q failed: %w", source, err)
}
if response.StatusCode < 200 || response.StatusCode > 299 {
status := response.StatusCode
return 0.0, fmt.Errorf("download from %q failed %d: %s", source, status, http.StatusText(status))
}
defer response.Body.Close()
text, err := io.ReadAll(response.Body)
if err != nil {
return 0.0, fmt.Errorf("reading body failed: %w", err)
}
if len(text) < 1 {
return 0.0, errors.New("empty body")
}
// Classify the license text
coverage := licensecheck.Scan(text)
if len(coverage.Match) == 0 {
return coverage.Percent, errors.New("no match found")
}
match := coverage.Match[0]
debugf("%q found match: %q with confidence %f%%", pkg.name, match.ID, coverage.Percent)
if match.ID != pkg.spdx {
return coverage.Percent, fmt.Errorf("classification %q does not match", match.ID)
}
return coverage.Percent, nil
}
func normalizeURL(raw string) (*url.URL, error) {
u, err := url.Parse(raw)
if err != nil {
return nil, err
}
switch u.Hostname() {
case "github.com":
u.Host = "raw.githubusercontent.com"
var cleaned []string
for _, p := range strings.Split(u.Path, "/") {
// Filter out elements
if p == "blob" {
continue
}
cleaned = append(cleaned, p)
}
u.Path = strings.Join(cleaned, "/")
case "gitlab.com":
u.Path = strings.Replace(u.Path, "/-/blob/", "/-/raw/", 1)
case "git.octo.it":
parts := strings.Split(u.RawQuery, ";")
for i, p := range parts {
if p == "a=blob" {
parts[i] = "a=blob_plain"
break
}
}
u.RawQuery = strings.Join(parts, ";")
}
return u, nil
}

View file

@ -0,0 +1,119 @@
package main
import (
"bufio"
"log"
"os"
"regexp"
"strings"
"github.com/coreos/go-semver/semver"
)
type whitelist []whitelistEntry
type whitelistEntry struct {
Name string
Version *semver.Version
Operator string
License string
}
var re = regexp.MustCompile(`^([<=>]+\s*)?([-\.\/\w]+)(@v[\d\.]+)?\s+([-\.\w]+)$`)
func (w *whitelist) Parse(filename string) error {
file, err := os.Open(filename)
if err != nil {
return err
}
defer file.Close()
// Read file line-by-line and split by semicolon
lineno := 0
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
lineno++
if strings.HasPrefix(line, "#") {
// Comment
continue
}
groups := re.FindAllStringSubmatch(line, -1)
if len(groups) != 1 {
log.Printf("WARN: Ignoring not matching entry in line %d", lineno)
continue
}
group := groups[0]
if len(group) != 5 {
// Malformed
log.Printf("WARN: Ignoring malformed entry in line %d", lineno)
continue
}
// An entry has the form:
// [operator]<package name>[@version] [license SPDX]
var operator, version string
if group[1] != "" {
operator = strings.TrimSpace(group[1])
}
name := group[2]
if group[3] != "" {
version = strings.TrimSpace(group[3])
version = strings.TrimLeft(version, "@v")
}
license := strings.TrimSpace(group[4])
entry := whitelistEntry{Name: name, License: license, Operator: operator}
if version != "" {
entry.Version, err = semver.NewVersion(version)
if err != nil {
// Malformed
log.Printf("Ignoring malformed version in line %d: %v", lineno, err)
continue
}
if entry.Operator == "" {
entry.Operator = "="
}
}
*w = append(*w, entry)
}
return scanner.Err()
}
func (w *whitelist) Check(pkg, version, spdx string) (ok, found bool) {
v := strings.TrimSpace(version)
v = strings.TrimPrefix(v, "v")
if v == "" {
return false, false
}
pkgver := *semver.New(v)
for _, entry := range *w {
if entry.Name != pkg {
continue
}
var match bool
switch entry.Operator {
case "":
match = true
case "=":
match = pkgver.Equal(*entry.Version)
case "<":
match = pkgver.LessThan(*entry.Version)
case "<=":
match = pkgver.LessThan(*entry.Version) || pkgver.Equal(*entry.Version)
case ">":
match = !pkgver.LessThan(*entry.Version) && !pkgver.Equal(*entry.Version)
case ">=":
match = !pkgver.LessThan(*entry.Version)
}
if match {
return entry.License == spdx, true
}
}
return false, false
}

View file

@ -0,0 +1,24 @@
# Test Package Files with LXD
Used to test the RPM and DEB packages using LXD across a variety of
distributions.
The image will add the InfluxData repo, install Telegraf, and ensure the
service is running. At that point the new package will get installed and
ensure the service is still running.
Any issues or errors will cause the test to fail.
## CLI
To test an RPM or DEB with a specific image:
```sh
./package-test-lxd --package telegraf_1.21.4-1_amd64.deb --image debian/bullseye
```
To test an RPM or a DEB with a whole set of images:
```sh
./package-test-lxd --package telegraf_1.21.4-1_amd64.deb
```

View file

@ -0,0 +1,361 @@
package main
import (
"errors"
"fmt"
"math"
"path/filepath"
"time"
)
const influxDataRPMRepo = `[influxdata]
name = InfluxData Repository - Stable
baseurl = https://repos.influxdata.com/stable/x86_64/main
enabled = 1
gpgcheck = 1
gpgkey = https://repos.influxdata.com/influxdata-archive_compat.key
`
type Container struct {
Name string
client IncusClient
packageManager string
}
// create container with given name and image
func (c *Container) Create(image string) error {
if c.Name == "" {
return errors.New("unable to create container: no name given")
}
c.client = IncusClient{}
err := c.client.Connect()
if err != nil {
return fmt.Errorf("failed to connect to incus: %w", err)
}
err = c.client.Create(c.Name, "images", image)
if err != nil {
return fmt.Errorf("failed to create instance: %w", err)
}
// at this point the container is created, so on any error during setup
// we want to delete it as well
err = c.client.Start(c.Name)
if err != nil {
c.Delete()
return fmt.Errorf("failed to start instance: %w", err)
}
if err := c.detectPackageManager(); err != nil {
c.Delete()
return err
}
if err := c.waitForNetwork(); err != nil {
c.Delete()
return err
}
if err := c.setupRepo(); err != nil {
c.Delete()
return err
}
return nil
}
// delete the container
func (c *Container) Delete() {
//nolint:errcheck // cleaning up state so no need to check for error
c.client.Stop(c.Name)
//nolint:errcheck // cleaning up state so no need to check for error
c.client.Delete(c.Name)
}
// installs the package from configured repos
func (c *Container) Install(packageName ...string) error {
var cmd []string
switch c.packageManager {
case "apt":
cmd = append([]string{"apt-get", "install", "--yes"}, packageName...)
case "yum":
cmd = append([]string{"yum", "install", "-y"}, packageName...)
case "dnf":
cmd = append([]string{"dnf", "install", "-y"}, packageName...)
case "zypper":
cmd = append([]string{"zypper", "--gpg-auto-import-keys", "install", "-y"}, packageName...)
}
err := c.client.Exec(c.Name, cmd...)
if err != nil {
return err
}
return nil
}
func (c *Container) CheckStatus(serviceName string) error {
// push a valid config first, then start the service
err := c.client.Exec(
c.Name,
"bash",
"-c",
"--",
"echo '[[inputs.cpu]]\n[[outputs.file]]' | "+
"tee /etc/telegraf/telegraf.conf",
)
if err != nil {
return err
}
err = c.client.Exec(
c.Name,
"bash",
"-c",
"--",
"ls -la /etc/telegraf/",
)
if err != nil {
return err
}
err = c.client.Exec(c.Name, "systemctl", "start", serviceName)
if err != nil {
//nolint:errcheck // cleaning up state so no need to check for error
c.client.Exec(c.Name, "systemctl", "status", serviceName)
//nolint:errcheck // cleaning up state so no need to check for error
c.client.Exec(c.Name, "journalctl", "--no-pager", "--unit", serviceName)
return err
}
err = c.client.Exec(c.Name, "systemctl", "status", serviceName)
if err != nil {
//nolint:errcheck // cleaning up state so no need to check for error
c.client.Exec(c.Name, "journalctl", "--no-pager", "--unit", serviceName)
return err
}
return nil
}
func (c *Container) UploadAndInstall(filename string) error {
basename := filepath.Base(filename)
destination := "/root/" + basename
if err := c.client.Push(c.Name, filename, destination); err != nil {
return err
}
return c.Install(destination)
}
// Push key and config and update
func (c *Container) configureApt() error {
err := c.client.Exec(c.Name, "apt-get", "update")
if err != nil {
return err
}
err = c.Install("ca-certificates", "gpg", "wget")
if err != nil {
return err
}
err = c.client.Exec(c.Name, "wget", "https://repos.influxdata.com/influxdata-archive_compat.key")
if err != nil {
return err
}
err = c.client.Exec(
c.Name,
"bash",
"-c",
"--",
"echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | "+
"sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | "+
"sudo tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null",
)
if err != nil {
return err
}
err = c.client.Exec(
c.Name,
"bash",
"-c",
"--",
"echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | "+
"tee /etc/apt/sources.list.d/influxdata.list",
)
if err != nil {
return err
}
err = c.client.Exec(
c.Name,
"bash", "-c", "--",
"cat /etc/apt/sources.list.d/influxdata.list",
)
if err != nil {
return err
}
err = c.client.Exec(c.Name, "apt-get", "update")
if err != nil {
return err
}
return nil
}
// Create config and update yum
func (c *Container) configureYum() error {
err := c.client.Exec(
c.Name,
"bash", "-c", "--",
fmt.Sprintf("echo -e %q > /etc/yum.repos.d/influxdata.repo", influxDataRPMRepo),
)
if err != nil {
return err
}
err = c.client.Exec(
c.Name,
"bash", "-c", "--",
"cat /etc/yum.repos.d/influxdata.repo",
)
if err != nil {
return err
}
// will return a non-zero return code if there are packages to update
return c.client.Exec(c.Name, "bash", "-c", "yum check-update || true")
}
// Create config and update dnf
func (c *Container) configureDnf() error {
err := c.client.Exec(
c.Name,
"bash", "-c", "--",
fmt.Sprintf("echo -e %q > /etc/yum.repos.d/influxdata.repo", influxDataRPMRepo),
)
if err != nil {
return err
}
err = c.client.Exec(
c.Name,
"bash", "-c", "--",
"cat /etc/yum.repos.d/influxdata.repo",
)
if err != nil {
return err
}
// will return a non-zero return code if there are packages to update
return c.client.Exec(c.Name, "bash", "-c", "dnf check-update || true")
}
// Create config and update zypper
func (c *Container) configureZypper() error {
err := c.client.Exec(
c.Name,
"bash", "-c", "--",
fmt.Sprintf("echo -e %q > /etc/zypp/repos.d/influxdata.repo", influxDataRPMRepo),
)
if err != nil {
return err
}
err = c.client.Exec(
c.Name,
"bash", "-c", "--",
"cat /etc/zypp/repos.d/influxdata.repo",
)
if err != nil {
return err
}
return c.client.Exec(c.Name, "zypper", "--no-gpg-checks", "refresh")
}
// Determine if the system uses yum or apt for software
func (c *Container) detectPackageManager() error {
// Different options required across the distros as apt returns -1 when
// run with no options. yum is listed last to prefer the newer
// options first.
err := c.client.Exec(c.Name, "which", "apt")
if err == nil {
c.packageManager = "apt"
return nil
}
err = c.client.Exec(c.Name, "dnf", "--version")
if err == nil {
c.packageManager = "dnf"
return nil
}
err = c.client.Exec(c.Name, "yum", "version")
if err == nil {
c.packageManager = "yum"
return nil
}
err = c.client.Exec(c.Name, "which", "zypper")
if err == nil {
c.packageManager = "zypper"
return nil
}
return errors.New("unable to determine package manager")
}
// Configure the system with InfluxData repo
func (c *Container) setupRepo() error {
if c.packageManager == "apt" {
if err := c.configureApt(); err != nil {
return err
}
} else if c.packageManager == "yum" {
if err := c.configureYum(); err != nil {
return err
}
} else if c.packageManager == "zypper" {
if err := c.configureZypper(); err != nil {
return err
}
} else if c.packageManager == "dnf" {
if err := c.configureDnf(); err != nil {
return err
}
}
return nil
}
// Wait for the network to come up on a container
func (c *Container) waitForNetwork() error {
var exponentialBackoffCeilingSecs int64 = 128
attempts := 0
for {
if err := c.client.Exec(c.Name, "getent", "hosts", "influxdata.com"); err == nil {
return nil
}
// uses exponetnial backoff to try after 1, 2, 4, 8, 16, etc. seconds
delaySecs := int64(math.Pow(2, float64(attempts)))
if delaySecs > exponentialBackoffCeilingSecs {
break
}
fmt.Printf("waiting for network, sleeping for %d second(s)\n", delaySecs)
time.Sleep(time.Duration(delaySecs) * time.Second)
attempts++
}
return errors.New("timeout reached waiting for network on container")
}

View file

@ -0,0 +1,195 @@
package main
import (
"bytes"
"errors"
"fmt"
"os"
"strconv"
"strings"
incus "github.com/lxc/incus/v6/client"
"github.com/lxc/incus/v6/shared/api"
)
var (
timeout = 120
killTimeout = 5
)
type BytesBuffer struct {
*bytes.Buffer
}
func (*BytesBuffer) Close() error {
return nil
}
type IncusClient struct {
Client incus.InstanceServer
}
// Connect to the LXD socket.
func (c *IncusClient) Connect() error {
client, err := incus.ConnectIncusUnix("", nil)
if err != nil {
return err
}
c.Client = client
return nil
}
// Create a container using a specific remote and alias.
func (c *IncusClient) Create(name, remote, alias string) error {
fmt.Printf("creating %s with %s:%s\n", name, remote, alias)
if c.Client == nil {
err := c.Connect()
if err != nil {
return err
}
}
server := ""
switch remote {
case "images":
server = "https://images.linuxcontainers.org"
case "ubuntu":
server = "https://cloud-images.ubuntu.com/releases"
case "ubuntu-daily":
server = "https://cloud-images.ubuntu.com/daily"
default:
return fmt.Errorf("unknown remote: %s", remote)
}
req := api.InstancesPost{
Name: name,
Source: api.InstanceSource{
Type: "image",
Mode: "pull",
Protocol: "simplestreams",
Server: server,
Alias: alias,
},
}
// Get LXD to create the container (background operation)
op, err := c.Client.CreateInstance(req)
if err != nil {
return err
}
// Wait for the operation to complete
err = op.Wait()
if err != nil {
return err
}
return nil
}
// Delete the given container.
func (c *IncusClient) Delete(name string) error {
fmt.Println("deleting", name)
op, err := c.Client.DeleteInstance(name)
if err != nil {
return err
}
return op.Wait()
}
// Run a command returning result struct. Will kill commands that take longer
// than 120 seconds.
func (c *IncusClient) Exec(name string, command ...string) error {
fmt.Printf("$ %s\n", strings.Join(command, " "))
cmd := []string{"/usr/bin/timeout", "-k", strconv.Itoa(killTimeout), strconv.Itoa(timeout)}
cmd = append(cmd, command...)
req := api.InstanceExecPost{
Command: cmd,
WaitForWS: true,
}
output := &BytesBuffer{bytes.NewBuffer(nil)}
args := incus.InstanceExecArgs{
Stdout: output,
Stderr: output,
DataDone: make(chan bool),
}
op, err := c.Client.ExecInstance(name, req, &args)
if err != nil {
return err
}
err = op.Wait()
if err != nil {
return err
}
// Wait for any remaining I/O to be flushed
<-args.DataDone
// get the return code
opAPI := op.Get()
rc := int(opAPI.Metadata["return"].(float64))
if rc != 0 {
return errors.New(output.String())
}
fmt.Println(output.String())
return nil
}
// Push file to container.
func (c *IncusClient) Push(name, src, dst string) error {
fmt.Printf("cp %s %s%s\n", src, name, dst)
f, err := os.Open(src)
if err != nil {
return fmt.Errorf("error reading %s: %w", src, err)
}
defer f.Close()
return c.Client.CreateInstanceFile(name, dst, incus.InstanceFileArgs{
Content: f,
Mode: 0644,
})
}
// Start the given container.
func (c *IncusClient) Start(name string) error {
fmt.Println("starting", name)
reqState := api.InstanceStatePut{
Action: "start",
Timeout: -1,
}
op, err := c.Client.UpdateInstanceState(name, reqState, "")
if err != nil {
return err
}
return op.Wait()
}
// Stop the given container.
func (c *IncusClient) Stop(name string) error {
fmt.Println("stopping", name)
reqState := api.InstanceStatePut{
Action: "stop",
Force: true,
Timeout: 10,
}
op, err := c.Client.UpdateInstanceState(name, reqState, "")
if err != nil {
return err
}
return op.Wait()
}

View file

@ -0,0 +1,116 @@
package main
import (
"fmt"
"os"
"path/filepath"
"github.com/google/uuid"
"github.com/urfave/cli/v2"
)
var imagesRPM = []string{
"fedora/41",
"fedora/40",
"centos/9-Stream",
}
var imagesDEB = []string{
"debian/bullseye",
"debian/bookworm",
"ubuntu/noble",
"ubuntu/jammy",
}
func main() {
packageFile := ""
image := ""
app := &cli.App{
Flags: []cli.Flag{
&cli.StringFlag{
Name: "package",
Usage: ".deb or .rpm file for upgrade testing",
Destination: &packageFile,
Required: true,
},
&cli.StringFlag{
Name: "image",
Usage: "optional, run with specific image",
Destination: &image,
},
},
Action: func(*cli.Context) error {
if _, err := os.Stat(packageFile); err != nil {
return fmt.Errorf("unknown package file: %w", err)
}
if image != "" && packageFile != "" {
fmt.Printf("test package %q on image %q\n", packageFile, image)
return launchTests(packageFile, []string{image})
} else if packageFile != "" {
fmt.Printf("test package %q on all applicable images\n", packageFile)
extension := filepath.Ext(packageFile)
switch extension {
case ".rpm":
return launchTests(packageFile, imagesRPM)
case ".deb":
return launchTests(packageFile, imagesDEB)
default:
return fmt.Errorf("%s has unknown package type: %s", packageFile, extension)
}
}
return nil
},
}
err := app.Run(os.Args)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func launchTests(packageFile string, images []string) error {
for _, image := range images {
fmt.Printf("starting test with %s\n", image)
uuidWithHyphen := uuid.New()
name := "telegraf-test-" + uuidWithHyphen.String()[0:8]
err := runTest(image, name, packageFile)
if err != nil {
fmt.Printf("*** FAIL: %s\n", image)
return err
}
fmt.Printf("*** PASS: %s\n\n", image)
}
fmt.Println("*** ALL TESTS PASS ***")
return nil
}
func runTest(image, name, packageFile string) error {
c := Container{Name: name}
if err := c.Create(image); err != nil {
return err
}
defer c.Delete()
if err := c.Install("telegraf"); err != nil {
return err
}
if err := c.CheckStatus("telegraf"); err != nil {
return err
}
if err := c.UploadAndInstall(packageFile); err != nil {
return err
}
return c.CheckStatus("telegraf")
}

View file

@ -0,0 +1,250 @@
// This is a tool to embed configuration files into the README.md of all plugins
// It searches for TOML sections in the plugins' README.md and detects includes specified in the form
//
// ```toml [@includeA.conf[ @includeB[ @...]]
// Whatever is in here gets replaced.
// ```
//
// Then it will replace everything in this section by the concatenation of the file `includeA.conf`, `includeB` etc.
// content. The tool is not stateful, so it can be run multiple time with a stable result as long
// as the included files do not change.
package main
import (
"bytes"
"fmt"
"io"
"log"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/yuin/goldmark"
"github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/text"
)
var (
// Finds all comment section parts `<-- @includefile -->`
commentIncludesEx = regexp.MustCompile(`<!--\s+(@.+)+\s+-->`)
// Finds all TOML sections of the form `toml @includefile`
tomlIncludesEx = regexp.MustCompile(`[\s"]+(@.+)+"?`)
// Extracts the `includefile` part
includeMatch = regexp.MustCompile(`(?:@([^\s"]+))+`)
)
type includeBlock struct {
Includes []string
Start int
Stop int
Newlines bool
}
func extractIncludeBlock(txt []byte, includesEx *regexp.Regexp, root string) *includeBlock {
includes := includesEx.FindSubmatch(txt)
if len(includes) != 2 {
return nil
}
block := includeBlock{}
for _, inc := range includeMatch.FindAllSubmatch(includes[1], -1) {
if len(inc) != 2 {
continue
}
include := string(inc[1])
// Make absolute paths relative to the include-root if any
if strings.HasPrefix(include, "/") {
if root == "" {
log.Printf("Ignoring absolute include %q without include root...", include)
continue
}
include = filepath.Join(root, include)
}
include, err := filepath.Abs(include)
if err != nil {
log.Printf("Cannot resolve include %q...", include)
continue
}
if fi, err := os.Stat(include); err != nil || !fi.Mode().IsRegular() {
log.Printf("Ignoring include %q as it cannot be found or is not a regular file...", include)
continue
}
block.Includes = append(block.Includes, include)
}
return &block
}
func insertInclude(buf *bytes.Buffer, include string) error {
file, err := os.Open(include)
if err != nil {
return fmt.Errorf("opening include %q failed: %w", include, err)
}
defer file.Close()
// Write the include and make sure we get a newline
if _, err := io.Copy(buf, file); err != nil {
return fmt.Errorf("inserting include %q failed: %w", include, err)
}
return nil
}
func insertIncludes(buf *bytes.Buffer, b *includeBlock) error {
// Insert newlines before and after
if b.Newlines {
buf.WriteByte('\n')
}
// Insert all includes in the order they occurred
for i, include := range b.Includes {
if i > 0 {
// Add a separating newline between included blocks
buf.WriteByte('\n')
}
if err := insertInclude(buf, include); err != nil {
return err
}
}
// Make sure we add a trailing newline
if !bytes.HasSuffix(buf.Bytes(), []byte("\n")) || b.Newlines {
buf.WriteByte('\n')
}
return nil
}
func main() {
// Estimate Telegraf root to be able to handle absolute paths
cwd, err := os.Getwd()
if err != nil {
log.Fatalf("Cannot get working directory: %v", err)
}
cwd, err = filepath.Abs(cwd)
if err != nil {
log.Fatalf("Cannot resolve working directory: %v", err)
}
var includeRoot string
if idx := strings.LastIndex(cwd, filepath.FromSlash("/plugins/")); idx > 0 {
includeRoot = cwd[:idx]
}
// Get the file permission of the README for later use
inputFilename := "README.md"
inputFileInfo, err := os.Lstat(inputFilename)
if err != nil {
log.Fatalf("Cannot get file permissions: %v", err)
}
perm := inputFileInfo.Mode().Perm()
// Read and parse the README markdown file
readme, err := os.ReadFile(inputFilename)
if err != nil {
log.Fatalf("Reading README failed: %v", err)
}
parser := goldmark.DefaultParser()
root := parser.Parse(text.NewReader(readme))
// Walk the markdown to identify the (TOML) parts to replace
blocksToReplace := make([]*includeBlock, 0)
for rawnode := root.FirstChild(); rawnode != nil; rawnode = rawnode.NextSibling() {
// Only match TOML code nodes
var txt []byte
var start, stop int
var newlines bool
var re *regexp.Regexp
switch node := rawnode.(type) {
case *ast.FencedCodeBlock:
if string(node.Language(readme)) != "toml" {
// Ignore any other node type or language
continue
}
// Extract the block borders
start = node.Info.Segment.Stop + 1
stop = start
lines := node.Lines()
if lines.Len() > 0 {
stop = lines.At(lines.Len() - 1).Stop
}
txt = node.Info.Value(readme)
re = tomlIncludesEx
case *ast.Heading:
if node.ChildCount() < 2 {
continue
}
child, ok := node.LastChild().(*ast.RawHTML)
if !ok || child.Segments.Len() == 0 {
continue
}
segment := child.Segments.At(0)
if !commentIncludesEx.Match(segment.Value(readme)) {
continue
}
start = segment.Stop + 1
stop = len(readme) // necessary for cases with no more headings
for rawnode = rawnode.NextSibling(); rawnode != nil; rawnode = rawnode.NextSibling() {
if h, ok := rawnode.(*ast.Heading); ok && h.Level <= node.Level {
if rawnode.Lines().Len() > 0 {
stop = rawnode.Lines().At(0).Start - h.Level - 1
} else {
//nolint:staticcheck // need to use this since we aren't sure the type
log.Printf("heading without lines: %s", string(rawnode.Text(readme)))
stop = start // safety measure to prevent removing all text
}
// Make sure we also iterate the present heading
rawnode = h.PreviousSibling()
break
}
}
txt = segment.Value(readme)
re = commentIncludesEx
newlines = true
default:
// Ignore everything else
continue
}
// Extract the includes from the node
block := extractIncludeBlock(txt, re, includeRoot)
if block != nil {
block.Start = start
block.Stop = stop
block.Newlines = newlines
blocksToReplace = append(blocksToReplace, block)
}
// Catch the case of heading-end-search exhausted all nodes
if rawnode == nil {
break
}
}
// Replace the content of the TOML blocks with includes
var output bytes.Buffer
output.Grow(len(readme))
offset := 0
for _, b := range blocksToReplace {
// Copy everything up to the beginning of the block we want to replace and make sure we get a newline
output.Write(readme[offset:b.Start])
if !bytes.HasSuffix(output.Bytes(), []byte("\n")) {
output.WriteString("\n")
}
offset = b.Stop
// Insert the include file
if err := insertIncludes(&output, b); err != nil {
log.Fatal(err)
}
}
// Copy the remaining of the original file...
output.Write(readme[offset:])
// Write output with same permission as input
file, err := os.OpenFile(inputFilename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, perm)
if err != nil {
log.Fatalf("Opening output file failed: %v", err)
}
defer file.Close()
if _, err := output.WriteTo(file); err != nil {
log.Panicf("Writing output file failed: %v", err)
}
}

View file

@ -0,0 +1,29 @@
# README.md linter
## Building
```shell
telegraf/tools/readme_linter$ go build .
```
## Running
Run readme_linter with the filenames of the readme files you want to lint.
```shell
telegraf/tools/readme_linter$ ./readme_linter <path to readme>
```
You can lint multiple filenames at once. This works well with shell globs.
To lint all the plugin readmes:
```shell
telegraf/tools/readme_linter$ ./readme_linter ../../plugins/*/*/README.md
```
To lint readmes for inputs starting a-d:
```shell
telegraf/tools/readme_linter$ ./readme_linter ../../plugins/inputs/[a-d]*/README.md
```

View file

@ -0,0 +1,158 @@
package main
import (
"fmt"
"regexp"
"runtime"
"sort"
"github.com/yuin/goldmark/ast"
)
// type for all linter assert methods
type T struct {
filename string
markdown []byte
newlineOffsets []int
sourceFlag bool
pluginType plugin
fails int
}
// called by all assert functions that involve a node
func (t *T) printFailedAssertf(n ast.Node, format string, args ...interface{}) {
t.printFile(n)
fmt.Printf(format+"\n", args...)
t.printRule(3)
t.fails++
}
// Assert function that doesnt involve a node, for example if something is missing
func (t *T) assertf(format string, args ...interface{}) {
t.assertLine2f(0, format, args...) // There's no line number associated, so use the first
}
func (t *T) assertNodef(n ast.Node, format string, args ...interface{}) {
t.printFailedAssertf(n, format, args...)
}
func (t *T) assertNodeLineOffsetf(n ast.Node, offset int, format string, args ...interface{}) {
t.printFileOffset(n, offset)
fmt.Printf(format+"\n", args...)
t.printRule(3)
t.fails++
}
func (t *T) assertLinef(line int, format string, args ...interface{}) {
// this func only exists to make the call stack to t.printRule the same depth
// as when called through assertf
t.assertLine2f(line, format, args...)
}
func (t *T) assertLine2f(line int, format string, args ...interface{}) {
t.printFileLine(line)
fmt.Printf(format+"\n", args...)
t.printRule(3)
t.fails++
}
func (t *T) printRule(callers int) {
if !t.sourceFlag {
return
}
pc, codeFilename, codeLine, ok := runtime.Caller(callers)
if !ok {
panic("can not get caller")
}
f := runtime.FuncForPC(pc)
var funcName string
if f != nil {
funcName = f.Name()
}
fmt.Printf("%s:%d: ", codeFilename, codeLine)
if len(funcName) == 0 {
fmt.Printf("failed assert\n")
} else {
fmt.Printf("failed assert in function %s\n", funcName)
}
}
func (t *T) line(offset int) int {
return sort.SearchInts(t.newlineOffsets, offset)
}
func (t *T) printFile(n ast.Node) {
t.printFileOffset(n, 0)
}
func (t *T) printFileOffset(n ast.Node, offset int) {
lines := n.Lines()
if lines == nil || lines.Len() == 0 {
t.printFileLine(0)
return
}
line := t.line(lines.At(0).Start)
t.printFileLine(line + offset)
}
func (t *T) printFileLine(line int) {
fmt.Printf("%s:%d: ", t.filename, line+1) // Lines start with 1
}
func (t *T) printPassFail() {
if t.fails == 0 {
fmt.Printf("Pass %s\n", t.filename)
} else {
fmt.Printf("Fail %s, %d failed assertions\n", t.filename, t.fails)
}
}
func (t *T) assertKind(expected ast.NodeKind, n ast.Node) {
if n.Kind() == expected {
return
}
t.printFailedAssertf(n, "expected %s, have %s", expected.String(), n.Kind().String())
}
func (t *T) assertFirstChildRegexp(expectedPattern string, n ast.Node) {
var validRegexp = regexp.MustCompile(expectedPattern)
if !n.HasChildren() {
t.printFailedAssertf(n, "expected children")
return
}
c := n.FirstChild()
//nolint:staticcheck // need to use this since we aren't sure the type
actual := string(c.Text(t.markdown))
if !validRegexp.MatchString(actual) {
t.printFailedAssertf(n, "%q does not match regexp %q", actual, expectedPattern)
return
}
}
func (t *T) assertHeadingLevel(expected int, n ast.Node) {
h, ok := n.(*ast.Heading)
if !ok {
fmt.Printf("failed Heading type assertion\n")
t.fails++
return
}
if h.Level == expected {
return
}
t.printFailedAssertf(n, "expected header level %d, have %d", expected, h.Level)
}
func (t *T) pass() bool {
return t.fails == 0
}

149
tools/readme_linter/main.go Normal file
View file

@ -0,0 +1,149 @@
package main
import (
"bufio"
"bytes"
"flag"
"os"
"github.com/yuin/goldmark"
"github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/extension"
"github.com/yuin/goldmark/parser"
"github.com/yuin/goldmark/text"
"github.com/yuin/goldmark/util"
)
func main() {
sourceFlag := flag.Bool("source", false, "include location of linter code that failed assertion")
quiet := flag.Bool("quiet", false, "only print failed assertion but no pass information")
flag.Parse()
var err error
pass := true
for _, filename := range flag.Args() {
var filePass bool
filePass, err = checkFile(filename, guessPluginType(filename), *sourceFlag, *quiet)
if err != nil {
panic(err)
}
pass = pass && filePass
}
if !pass {
os.Exit(1)
}
}
type ruleFunc func(*T, ast.Node) error
type rulesMap map[plugin][]ruleFunc
var rules rulesMap
func init() {
rules = make(rulesMap)
// Rules for all plugin types
all := []ruleFunc{
firstSection,
metadata,
configSection,
relativeTelegrafLinks,
noLongLinesInParagraphs(80),
}
for i := pluginInput; i <= pluginParser; i++ {
rules[i] = all
}
// Rules for input plugins
rules[pluginInput] = append(rules[pluginInput], []ruleFunc{
requiredSectionsClose([]string{
"Example Output",
"Metrics",
"Global configuration options",
}),
}...)
// Rules for output plugins
rules[pluginOutput] = append(rules[pluginOutput], []ruleFunc{
requiredSectionsClose([]string{
"Global configuration options",
}),
}...)
// Rules for processor pluings
rules[pluginProcessor] = append(rules[pluginProcessor], []ruleFunc{
requiredSectionsClose([]string{
"Global configuration options",
}),
}...)
// Rules for aggregator pluings
rules[pluginAggregator] = append(rules[pluginAggregator], []ruleFunc{
requiredSectionsClose([]string{
"Global configuration options",
}),
}...)
}
func checkFile(filename string, pluginType plugin, sourceFlag, quiet bool) (bool, error) {
md, err := os.ReadFile(filename)
if err != nil {
return false, err
}
// Goldmark returns locations as offsets. We want line
// numbers. Find the newlines in the file so we can translate
// later.
scanner := bufio.NewScanner(bytes.NewReader(md))
scanner.Split(bufio.ScanRunes)
offset := 0
newlineOffsets := make([]int, 0)
for scanner.Scan() {
if scanner.Text() == "\n" {
newlineOffsets = append(newlineOffsets, offset)
}
offset++
}
p := goldmark.DefaultParser()
// We need goldmark to parse tables, otherwise they show up as
// paragraphs. Since tables often have long lines and we check for long
// lines in paragraphs, without table parsing there are false positive long
// lines in tables.
//
// The tableParagraphTransformer is an extension and not part of the default
// parser so we add it. There may be an easier way to do it, but this works:
p.AddOptions(
parser.WithParagraphTransformers(
util.Prioritized(extension.NewTableParagraphTransformer(), 99),
),
)
r := text.NewReader(md)
root := p.Parse(r)
rules := rules[pluginType]
tester := T{
filename: filename,
markdown: md,
newlineOffsets: newlineOffsets,
sourceFlag: sourceFlag,
pluginType: pluginType,
}
for _, rule := range rules {
err = rule(&tester, root)
if err != nil {
return false, err
}
}
if !quiet {
tester.printPassFail()
}
return tester.pass(), nil
}

View file

@ -0,0 +1,36 @@
package main
import (
"path/filepath"
)
type plugin int
const (
pluginNone plugin = iota
pluginInput
pluginOutput
pluginProcessor
pluginAggregator
pluginParser
)
func guessPluginType(filename string) plugin {
// Switch takes `plugins/inputs/amd_rocm_smi/README.md` and converts it to
// `plugins/inputs`. This avoids parsing READMEs that are under a plugin
// like those found in test folders as actual plugin readmes.
switch filepath.Dir(filepath.Dir(filename)) {
case "plugins/inputs":
return pluginInput
case "plugins/outputs":
return pluginOutput
case "plugins/processors":
return pluginProcessor
case "plugins/aggregators":
return pluginAggregator
case "plugins/parsers":
return pluginParser
default:
return pluginNone
}
}

View file

@ -0,0 +1,456 @@
package main
import (
"bufio"
"bytes"
"regexp"
"slices"
"strings"
"github.com/yuin/goldmark/ast"
)
var (
// Setup regular expression for checking versions and valid choices
// Matches HTML comments (e.g., <!-- some comment -->) surrounded by optional whitespace
metaComment = regexp.MustCompile(`(?:\s*<!-- .* -->\s*)`)
// Matches Telegraf versioning format (e.g., "Telegraf v1.2.3")
metaVersion = regexp.MustCompile(`^Telegraf v\d+\.\d+\.\d+(?:\s+<!-- .* -->\s*)?$`)
metaTags = map[plugin][]string{
pluginInput: {
"applications",
"cloud",
"containers",
"datastore",
"hardware",
"iot",
"logging",
"messaging",
"network",
"server",
"system",
"testing",
"web",
},
pluginOutput: {
"applications",
"cloud",
"containers",
"datastore",
"hardware",
"iot",
"logging",
"messaging",
"network",
"server",
"system",
"testing",
"web",
},
pluginAggregator: {
"math",
"sampling",
"statistics",
"transformation",
},
pluginProcessor: {
"math",
"sampling",
"statistics",
"transformation",
},
}
metaOSes = []string{
"all",
"freebsd",
"linux",
"macos",
"solaris",
"windows",
}
metaOrder = []string{
"introduction version",
"deprecation version",
"removal version",
"tags",
"operating systems",
}
)
// The first section is a heading with plugin name and paragraph short
// description
func firstSection(t *T, root ast.Node) error {
var n ast.Node
n = root.FirstChild()
// Ignore HTML comments such as linter ignore sections
for {
if n == nil {
break
}
if _, ok := n.(*ast.HTMLBlock); !ok {
break
}
n = n.NextSibling()
}
t.assertKind(ast.KindHeading, n)
t.assertHeadingLevel(1, n)
t.assertFirstChildRegexp(` Plugin$`, n)
// Make sure there is some text after the heading
n = n.NextSibling()
t.assertKind(ast.KindParagraph, n)
length := len(n.(*ast.Paragraph).Lines().Value(t.markdown))
if length < 30 {
t.assertNodef(n, "short first section. Please add short description of plugin. length %d, minimum 30", length)
}
return nil
}
// Somewhere there should be a heading "sample configuration" and a
// toml code block. The toml should match what is in the plugin's go
// code
// Second level headings should include
func requiredSections(t *T, root ast.Node, headings []string) error {
headingsSet := newSet(headings)
expectedLevel := 2
titleCounts := make(map[string]int)
for n := root.FirstChild(); n != nil; n = n.NextSibling() {
var h *ast.Heading
var ok bool
if h, ok = n.(*ast.Heading); !ok {
continue
}
child := h.FirstChild()
if child == nil {
continue
}
//nolint:staticcheck // need to use this since we aren't sure the type
title := strings.TrimSpace(string(child.Text(t.markdown)))
if headingsSet.has(title) && h.Level != expectedLevel {
t.assertNodef(n, "has required section %q but wrong heading level. Expected level %d, found %d",
title, expectedLevel, h.Level)
}
titleCounts[title]++
}
headingsSet.forEach(func(title string) {
if _, exists := titleCounts[title]; !exists {
t.assertf("missing required section %q", title)
}
})
return nil
}
// Use this to make a rule that looks for a list of settings. (this is
// a closure of func requiredSection)
func requiredSectionsClose(headings []string) func(*T, ast.Node) error {
return func(t *T, root ast.Node) error {
return requiredSections(t, root, headings)
}
}
func noLongLinesInParagraphs(threshold int) func(*T, ast.Node) error {
return func(t *T, root ast.Node) error {
// We're looking for long lines in paragraphs. Find paragraphs
// first, then which lines are in paragraphs
paraLines := make([]int, 0)
for n := root.FirstChild(); n != nil; n = n.NextSibling() {
var p *ast.Paragraph
var ok bool
if p, ok = n.(*ast.Paragraph); !ok {
continue // only looking for paragraphs
}
segs := p.Lines()
for _, seg := range segs.Sliced(0, segs.Len()) {
line := t.line(seg.Start)
paraLines = append(paraLines, line)
}
}
// Find long lines in the whole file
longLines := make([]int, 0, len(t.newlineOffsets))
last := 0
for i, cur := range t.newlineOffsets {
length := cur - last - 1 // -1 to exclude the newline
if length > threshold {
longLines = append(longLines, i)
}
last = cur
}
// Merge both lists
p := 0
l := 0
bads := make([]int, 0, max(len(paraLines), len(longLines)))
for p < len(paraLines) && l < len(longLines) {
long := longLines[l]
para := paraLines[p]
switch {
case long == para:
bads = append(bads, long)
p++
l++
case long < para:
l++
case long > para:
p++
}
}
for _, bad := range bads {
t.assertLinef(bad, "long line in paragraph")
}
return nil
}
}
func configSection(t *T, root ast.Node) error {
var config *ast.Heading
config = nil
expectedTitle := "Configuration"
for n := root.FirstChild(); n != nil; n = n.NextSibling() {
var h *ast.Heading
var ok bool
if h, ok = n.(*ast.Heading); !ok {
continue
}
//nolint:staticcheck // need to use this since we aren't sure the type
title := string(h.FirstChild().Text(t.markdown))
if title == expectedTitle {
config = h
continue
}
}
if config == nil {
t.assertf("missing required section %q", expectedTitle)
return nil
}
toml := config.NextSibling()
if toml == nil {
t.assertNodef(toml, "missing config next sibling")
return nil
}
var b *ast.FencedCodeBlock
var ok bool
if b, ok = toml.(*ast.FencedCodeBlock); !ok {
t.assertNodef(toml, "config next sibling isn't a fenced code block")
return nil
}
if !bytes.Equal(b.Language(t.markdown), []byte("toml")) {
t.assertNodef(b, "config fenced code block isn't toml language")
return nil
}
return nil
}
// Links from one markdown file to another in the repo should be relative
func relativeTelegrafLinks(t *T, root ast.Node) error {
for n := root.FirstChild(); n != nil; n = n.NextSibling() {
if _, ok := n.(*ast.Paragraph); !ok {
continue
}
for n2 := n.FirstChild(); n2 != nil; n2 = n2.NextSibling() {
var l *ast.Link
var ok bool
if l, ok = n2.(*ast.Link); !ok {
continue
}
link := string(l.Destination)
if strings.HasPrefix(link, "https://github.com/influxdata/telegraf/blob") {
t.assertNodef(n, "in-repo link must be relative: %s", link)
}
}
}
return nil
}
// Each plugin should have metadata for documentation generation
func metadata(t *T, root ast.Node) error {
const icons string = "⭐🚩🔥🏷️💻"
n := root.FirstChild()
if n == nil {
t.assertf("no metadata section found")
return nil
}
// Advance to the first heading which should be the plugin header
for n != nil {
if _, ok := n.(*ast.Heading); ok {
t.assertHeadingLevel(1, n)
break
}
n = n.NextSibling()
}
// Get the description text and check for metadata
positions := make([]string, 0, 5)
for n != nil {
n = n.NextSibling()
// The next heading will end the initial section
if _, ok := n.(*ast.Heading); ok {
break
}
// Ignore everything that is not text
para, ok := n.(*ast.Paragraph)
if !ok {
continue
}
// Metadata should be separate paragraph with the items ordered.
var inMetadata bool
var counter int
scanner := bufio.NewScanner(bytes.NewBuffer(para.Lines().Value(t.markdown)))
for scanner.Scan() {
txt := scanner.Text()
if counter == 0 {
inMetadata = strings.ContainsAny(txt, icons)
}
counter++
// If we are not in a metadata section, we need to make sure we don't
// see any metadata in this text.
if !inMetadata {
if strings.ContainsAny(txt, icons) {
t.assertNodeLineOffsetf(n, counter-1, "metadata found in section not surrounded by empty lines")
return nil
}
continue
}
icon, remainder, found := strings.Cut(txt, " ")
if !found || !strings.Contains(icons, icon) {
t.assertNodeLineOffsetf(n, counter-1, "metadata line must start with a valid icon and a space")
continue
}
if strings.ContainsAny(remainder, icons) {
t.assertNodeLineOffsetf(n, counter-1, "each metadata entry must be on a separate line")
continue
}
// We are in a metadata section, so test for the correct structure
switch icon {
case "⭐":
if !metaVersion.MatchString(remainder) {
t.assertNodeLineOffsetf(n, counter-1, "invalid introduction version format; has to be 'Telegraf vX.Y.Z'")
}
positions = append(positions, "introduction version")
case "🚩":
if !metaVersion.MatchString(remainder) {
t.assertNodeLineOffsetf(n, counter-1, "invalid deprecation version format; has to be 'Telegraf vX.Y.Z'")
}
positions = append(positions, "deprecation version")
case "🔥":
if !metaVersion.MatchString(remainder) {
t.assertNodeLineOffsetf(n, counter-1, "invalid removal version format; has to be 'Telegraf vX.Y.Z'")
}
positions = append(positions, "removal version")
case "🏷️":
validTags, found := metaTags[t.pluginType]
if !found {
t.assertNodeLineOffsetf(n, counter-1, "no tags expected for plugin type")
continue
}
for _, tag := range strings.Split(remainder, ",") {
tag = metaComment.ReplaceAllString(tag, "")
if !slices.Contains(validTags, strings.TrimSpace(tag)) {
t.assertNodeLineOffsetf(n, counter-1, "unknown tag %q", tag)
}
}
positions = append(positions, "tags")
case "💻":
for _, os := range strings.Split(remainder, ",") {
os = metaComment.ReplaceAllString(os, "")
if !slices.Contains(metaOSes, strings.TrimSpace(os)) {
t.assertNodeLineOffsetf(n, counter-1, "unknown operating system %q", os)
}
}
positions = append(positions, "operating systems")
default:
t.assertNodeLineOffsetf(n, counter-1, "invalid metadata icon")
continue
}
}
}
if len(positions) == 0 {
t.assertf("metadata is missing")
return nil
}
// Check for duplicate entries
seen := make(map[string]bool)
for _, p := range positions {
if seen[p] {
t.assertNodef(n, "duplicate metadata entry for %q", p)
return nil
}
seen[p] = true
}
// Remove the optional entries from the checklist
validOrder := append(make([]string, 0, len(metaOrder)), metaOrder...)
if !slices.Contains(positions, "deprecation version") && !slices.Contains(positions, "removal version") {
idx := slices.Index(validOrder, "deprecation version")
validOrder = slices.Delete(validOrder, idx, idx+1)
idx = slices.Index(validOrder, "removal version")
validOrder = slices.Delete(validOrder, idx, idx+1)
}
if _, found := metaTags[t.pluginType]; !found {
idx := slices.Index(metaOrder, "tags")
metaOrder = slices.Delete(metaOrder, idx, idx+1)
}
// Check the order of the metadata entries and required entries
if len(validOrder) != len(positions) {
for _, v := range validOrder {
if !slices.Contains(positions, v) {
t.assertNodef(n, "metadata entry for %q is missing", v)
}
}
return nil
}
for i, v := range validOrder {
if v != positions[i] {
if i == 0 {
t.assertNodef(n, "%q has to be the first entry", v)
} else {
t.assertNodef(n, "%q has to follow %q", v, validOrder[i-1])
}
return nil
}
}
return nil
}
// To do: Check markdown files that aren't plugin readme files for paragraphs
// with long lines
// To do: Check the toml inside the configuration section for syntax errors

View file

@ -0,0 +1,32 @@
package main
type set struct {
m map[string]struct{}
}
func (s *set) add(key string) {
s.m[key] = struct{}{}
}
func (s *set) has(key string) bool {
var ok bool
_, ok = s.m[key]
return ok
}
func (s *set) forEach(f func(string)) {
for key := range s.m {
f(key)
}
}
func newSet(elems []string) *set {
s := &set{
m: make(map[string]struct{}),
}
for _, elem := range elems {
s.add(elem)
}
return s
}

View file

@ -0,0 +1,12 @@
# Update Go Version
The version doesn't require a leading "v" and minor versions don't need
a trailing ".0". The tool will still will work correctly if they are provided.
`go run tools/update_goversion/main.go 1.19.2`
`go run tools/update_goversion/main.go 1.19`
This tool is meant to be used to create a pull request that will update the
Telegraf project to use the latest version of Go.
The Dockerfile `quay.io/influxdb/telegraf-ci` used by the CI will have to be
pushed to the quay repository by a maintainer with `make ci`.

View file

@ -0,0 +1,216 @@
package main
import (
"errors"
"fmt"
"io"
"log"
"net/http"
"os"
"regexp"
"strings"
"github.com/coreos/go-semver/semver"
"golang.org/x/net/html"
)
type FileInfo struct {
FileName string
Regex string
Replace string
}
func (f FileInfo) Update() error {
b, err := os.ReadFile(f.FileName)
if err != nil {
return err
}
re := regexp.MustCompile(f.Regex)
newContents := re.ReplaceAll(b, []byte(f.Replace))
err = os.WriteFile(f.FileName, newContents, 0640)
if err != nil {
return err
}
return nil
}
// removePatch cleans version from "1.20.1" to "1.20" (think go.mod entry)
func removePatch(version string) string {
verInfo := semver.New(version)
return fmt.Sprintf("%d.%d", verInfo.Major, verInfo.Minor)
}
// findHash will search the downloads table for the hashes matching the artifacts list
func findHashes(body io.Reader, version string) (map[string]string, error) {
htmlTokens := html.NewTokenizer(body)
artifacts := []string{
fmt.Sprintf("go%s.linux-amd64.tar.gz", version),
fmt.Sprintf("go%s.darwin-arm64.tar.gz", version),
fmt.Sprintf("go%s.darwin-amd64.tar.gz", version),
}
var insideDownloadTable bool
var currentRow string
hashes := make(map[string]string)
for {
tokenType := htmlTokens.Next()
// if it's an error token, we either reached
// the end of the file, or the HTML was malformed
if tokenType == html.ErrorToken {
err := htmlTokens.Err()
if errors.Is(err, io.EOF) {
// end of the file, break out of the loop
break
}
return nil, htmlTokens.Err()
}
if tokenType == html.StartTagToken {
// get the token
token := htmlTokens.Token()
if token.Data == "table" && len(token.Attr) == 1 && token.Attr[0].Val == "downloadtable" {
insideDownloadTable = true
}
if insideDownloadTable && token.Data == "a" && len(token.Attr) == 2 {
for _, f := range artifacts {
// Check if the current row matches a desired file
if strings.Contains(token.Attr[1].Val, f) {
currentRow = f
break
}
}
}
if currentRow != "" && token.Data == "tt" {
// the next token should be the page title
tokenType = htmlTokens.Next()
// just make sure it's actually a text token
if tokenType == html.TextToken {
hashes[currentRow] = htmlTokens.Token().Data
currentRow = ""
}
}
}
// Found a hash for each filename
if len(hashes) == len(artifacts) {
break
}
// Reached end of table
if tokenType == html.EndTagToken && htmlTokens.Token().Data == "table" {
if len(hashes) == 0 {
return nil, fmt.Errorf("could not find version %q on downloads page", version)
}
return nil, fmt.Errorf("only found %d hashes expected %d: %v", len(hashes), len(artifacts), hashes)
}
}
return hashes, nil
}
func getHashes(version string) (map[string]string, error) {
resp, err := http.Get(`https://go.dev/dl/`)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return findHashes(resp.Body, version)
}
func main() {
version := os.Args[1]
// Handle situation user accidentally provides version as "v1.19.2"
if strings.HasPrefix(version, "v") {
version = strings.TrimLeft(version, "v")
}
hashes, err := getHashes(version)
if err != nil {
log.Fatal(err)
}
for file, hash := range hashes {
fmt.Printf("%s %s\n", hash, file)
}
noPatchVersion := removePatch(version)
files := []FileInfo{
{
FileName: ".circleci/config.yml",
Regex: `(quay\.io\/influxdb\/telegraf-ci):(\d.\d*.\d)`,
Replace: "$1:" + version,
},
{
FileName: "go.mod",
Regex: `(go)\s(\d.\d*)`,
Replace: "$1 " + noPatchVersion,
},
{
FileName: "Makefile",
Regex: `(quay\.io\/influxdb\/telegraf-ci):(\d.\d*.\d)`,
Replace: "$1:" + version,
},
{
FileName: "README.md",
Regex: `(Telegraf requires Go version) (\d.\d*)`,
Replace: "$1 " + noPatchVersion,
},
{
FileName: "scripts/ci.docker",
Regex: `(FROM golang):(\d.\d*.\d)`,
Replace: "$1:" + version,
},
{
FileName: "scripts/installgo_linux.sh",
Regex: `(GO_VERSION)=("\d.\d*.\d")`,
Replace: fmt.Sprintf("$1=%q", version),
},
{
FileName: "scripts/installgo_mac.sh",
Regex: `(GO_VERSION)=("\d.\d*.\d")`,
Replace: fmt.Sprintf("$1=%q", version),
},
{
FileName: "scripts/installgo_windows.sh",
Regex: `(GO_VERSION)=("\d.\d*.\d")`,
Replace: fmt.Sprintf("$1=%q", version),
},
{
FileName: "scripts/installgo_linux.sh",
Regex: `(GO_VERSION_SHA)=".*"`,
Replace: fmt.Sprintf("$1=%q", hashes[fmt.Sprintf("go%s.linux-amd64.tar.gz", version)]),
},
{
FileName: "scripts/installgo_mac.sh",
Regex: `(GO_VERSION_SHA_arm64)=".*"`,
Replace: fmt.Sprintf("$1=%q", hashes[fmt.Sprintf("go%s.darwin-arm64.tar.gz", version)]),
},
{
FileName: "scripts/installgo_mac.sh",
Regex: `(GO_VERSION_SHA_amd64)=".*"`,
Replace: fmt.Sprintf("$1=%q", hashes[fmt.Sprintf("go%s.darwin-amd64.tar.gz", version)]),
},
{
FileName: ".github/workflows/readme-linter.yml",
Regex: `(go-version): '\d.\d*.\d'`,
Replace: fmt.Sprintf("$1: '%s'", version),
},
}
for _, f := range files {
fmt.Printf("Updating %s \n", f.FileName)
err := f.Update()
if err != nil {
log.Panic(err)
}
}
}

View file

@ -0,0 +1,46 @@
package main
import (
"bytes"
"os"
"testing"
"github.com/stretchr/testify/require"
)
func TestFindHash(t *testing.T) {
tests := []struct {
testFile string
version string
expectedHashes map[string]string
}{
{
"testdata/godev_patch.html",
"1.19.2",
map[string]string{
"go1.19.2.linux-amd64.tar.gz": "5e8c5a74fe6470dd7e055a461acda8bb4050ead8c2df70f227e3ff7d8eb7eeb6",
"go1.19.2.darwin-arm64.tar.gz": "35d819df25197c0be45f36ce849b994bba3b0559b76d4538b910d28f6395c00d",
"go1.19.2.darwin-amd64.tar.gz": "16f8047d7b627699b3773680098fbaf7cc962b7db02b3e02726f78c4db26dfde",
},
},
{
"testdata/godev_minor.html",
"1.19.0",
map[string]string{
"go1.19.0.linux-amd64.tar.gz": "464b6b66591f6cf055bc5df90a9750bf5fbc9d038722bb84a9d56a2bea974be6",
"go1.19.0.darwin-arm64.tar.gz": "859e0a54b7fcea89d9dd1ec52aab415ac8f169999e5fdfb0f0c15b577c4ead5e",
"go1.19.0.darwin-amd64.tar.gz": "df6509885f65f0d7a4eaf3dfbe7dda327569787e8a0a31cbf99ae3a6e23e9ea8",
},
},
}
for _, test := range tests {
b, err := os.ReadFile(test.testFile)
require.NoError(t, err)
hashes, err := findHashes(bytes.NewReader(b), test.version)
require.NoError(t, err)
require.Equal(t, test.expectedHashes, hashes)
}
}

View file

@ -0,0 +1,192 @@
<table class="downloadtable">
<thead>
<tr class="first">
<th>File name</th>
<th>Kind</th>
<th>OS</th>
<th>Arch</th>
<th>Size</th>
<th>SHA256 Checksum</th>
</tr>
</thead>
<tr class="highlight">
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.src.tar.gz">go1.19.0.src.tar.gz</a></td>
<td>Source</td>
<td></td>
<td></td>
<td>25MB</td>
<td><tt>9419cc70dc5a2523f29a77053cafff658ed21ef3561d9b6b020280ebceab28b9</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.darwin-amd64.tar.gz">go1.19.0.darwin-amd64.tar.gz</a></td>
<td>Archive</td>
<td>macOS</td>
<td>x86-64</td>
<td>144MB</td>
<td><tt>df6509885f65f0d7a4eaf3dfbe7dda327569787e8a0a31cbf99ae3a6e23e9ea8</tt></td>
</tr>
<tr class="highlight">
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.darwin-amd64.pkg">go1.19.0.darwin-amd64.pkg</a></td>
<td>Installer</td>
<td>macOS</td>
<td>x86-64</td>
<td>145MB</td>
<td><tt>61cb2268c4b3e6662a19c2dda58d43bfcf89d3649c205bcdb32c148e9048b1ba</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.darwin-arm64.tar.gz">go1.19.0.darwin-arm64.tar.gz</a></td>
<td>Archive</td>
<td>macOS</td>
<td>ARM64</td>
<td>138MB</td>
<td><tt>859e0a54b7fcea89d9dd1ec52aab415ac8f169999e5fdfb0f0c15b577c4ead5e</tt></td>
</tr>
<tr class="highlight">
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.darwin-arm64.pkg">go1.19.0.darwin-arm64.pkg</a></td>
<td>Installer</td>
<td>macOS</td>
<td>ARM64</td>
<td>139MB</td>
<td><tt>167cd1107886c36fda76404e82d1447ebd3efc472fd9a9fe3b2e872d41adf981</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.linux-386.tar.gz">go1.19.0.linux-386.tar.gz</a></td>
<td>Archive</td>
<td>Linux</td>
<td>x86</td>
<td>114MB</td>
<td><tt>6f721fa3e8f823827b875b73579d8ceadd9053ad1db8eaa2393c084865fb4873</tt></td>
</tr>
<tr class="highlight">
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.linux-amd64.tar.gz">go1.19.0.linux-amd64.tar.gz</a></td>
<td>Archive</td>
<td>Linux</td>
<td>x86-64</td>
<td>142MB</td>
<td><tt>464b6b66591f6cf055bc5df90a9750bf5fbc9d038722bb84a9d56a2bea974be6</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.linux-arm64.tar.gz">go1.19.0.linux-arm64.tar.gz</a></td>
<td>Archive</td>
<td>Linux</td>
<td>ARM64</td>
<td>110MB</td>
<td><tt>efa97fac9574fc6ef6c9ff3e3758fb85f1439b046573bf434cccb5e012bd00c8</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.linux-armv6l.tar.gz">go1.19.0.linux-armv6l.tar.gz</a></td>
<td>Archive</td>
<td>Linux</td>
<td>ARMv6</td>
<td>111MB</td>
<td><tt>25197c7d70c6bf2b34d7d7c29a2ff92ba1c393f0fb395218f1147aac2948fb93</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.windows-386.zip">go1.19.0.windows-386.zip</a></td>
<td>Archive</td>
<td>Windows</td>
<td>x86</td>
<td>128MB</td>
<td><tt>45b80c0aca6a5a1f87f111d375db5afee3ce0a9fd5834041c39116e643ba1df2</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.windows-386.msi">go1.19.0.windows-386.msi</a></td>
<td>Installer</td>
<td>Windows</td>
<td>x86</td>
<td>112MB</td>
<td><tt>39ed9b03c42b0ee99477377c27b1a809a73b96c627d86c4aedd133e92df5bd43</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.windows-amd64.zip">go1.19.0.windows-amd64.zip</a></td>
<td>Archive</td>
<td>Windows</td>
<td>x86-64</td>
<td>156MB</td>
<td><tt>bcaaf966f91980d35ae93c37a8fe890e4ddfca19448c0d9f66c027d287e2823a</tt></td>
</tr>
<tr class="highlight">
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.windows-amd64.msi">go1.19.0.windows-amd64.msi</a></td>
<td>Installer</td>
<td>Windows</td>
<td>x86-64</td>
<td>136MB</td>
<td><tt>0743b5fe0c6e5c67c7d131a8e24d4e7bdd5ef272dd13205dd7ae30cc2f464123</tt></td>
</tr>
<tr class="first"><th colspan="6" class="first">Other Ports</th></tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.freebsd-386.tar.gz">go1.19.0.freebsd-386.tar.gz</a></td>
<td>Archive</td>
<td>FreeBSD</td>
<td>x86</td>
<td>114MB</td>
<td><tt>3989e2336dbb3dcf9197b8c0ef9227cdd1a134789d83095d20ebdc1d88edb9f0</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.freebsd-amd64.tar.gz">go1.19.0.freebsd-amd64.tar.gz</a></td>
<td>Archive</td>
<td>FreeBSD</td>
<td>x86-64</td>
<td>142MB</td>
<td><tt>eca1a8f7b6ff6146efc285eed581096b12b59c1f0488bfe98ed053ab205267ca</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.linux-ppc64le.tar.gz">go1.19.0.linux-ppc64le.tar.gz</a></td>
<td>Archive</td>
<td>Linux</td>
<td>ppc64le</td>
<td>110MB</td>
<td><tt>92bf5aa598a01b279d03847c32788a3a7e0a247a029dedb7c759811c2a4241fc</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.linux-s390x.tar.gz">go1.19.0.linux-s390x.tar.gz</a></td>
<td>Archive</td>
<td>Linux</td>
<td>s390x</td>
<td>113MB</td>
<td><tt>58723eb8e3c7b9e8f5e97b2d38ace8fd62d9e5423eaa6cdb7ffe5f881cb11875</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.windows-arm64.zip">go1.19.0.windows-arm64.zip</a></td>
<td>Archive</td>
<td>Windows</td>
<td>ARM64</td>
<td>122MB</td>
<td><tt>032f1f75a85bc595bf5eb8b48ec8e490121047915803ad62277586b2e13608f2</tt></td>
</tr>
<tr class="highlight">
<td class="filename"><a class="download" href="/web/20220802171248/https://go.dev/dl/go1.19.0.windows-arm64.msi">go1.19.0.windows-arm64.msi</a></td>
<td>Installer</td>
<td>Windows</td>
<td>ARM64</td>
<td>107MB</td>
<td><tt>6319ab5b50f0462efcb092eec3ac8c23a548652cdaa2755282aed4513b6188ee</tt></td>
</tr>
</table>

View file

@ -0,0 +1,192 @@
<table class="downloadtable">
<thead>
<tr class="first">
<th>File name</th>
<th>Kind</th>
<th>OS</th>
<th>Arch</th>
<th>Size</th>
<th>SHA256 Checksum</th>
</tr>
</thead>
<tr class="highlight">
<td class="filename"><a class="download" href="/dl/go1.19.2.src.tar.gz">go1.19.2.src.tar.gz</a></td>
<td>Source</td>
<td></td>
<td></td>
<td>25MB</td>
<td><tt>2ce930d70a931de660fdaf271d70192793b1b240272645bf0275779f6704df6b</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.darwin-amd64.tar.gz">go1.19.2.darwin-amd64.tar.gz</a></td>
<td>Archive</td>
<td>macOS</td>
<td>x86-64</td>
<td>144MB</td>
<td><tt>16f8047d7b627699b3773680098fbaf7cc962b7db02b3e02726f78c4db26dfde</tt></td>
</tr>
<tr class="highlight">
<td class="filename"><a class="download" href="/dl/go1.19.2.darwin-amd64.pkg">go1.19.2.darwin-amd64.pkg</a></td>
<td>Installer</td>
<td>macOS</td>
<td>x86-64</td>
<td>145MB</td>
<td><tt>2633f62c0b259a8fa4fb1fc967d28817816acca50b1fc354364cb381e1ccd2e5</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.darwin-arm64.tar.gz">go1.19.2.darwin-arm64.tar.gz</a></td>
<td>Archive</td>
<td>macOS</td>
<td>ARM64</td>
<td>138MB</td>
<td><tt>35d819df25197c0be45f36ce849b994bba3b0559b76d4538b910d28f6395c00d</tt></td>
</tr>
<tr class="highlight">
<td class="filename"><a class="download" href="/dl/go1.19.2.darwin-arm64.pkg">go1.19.2.darwin-arm64.pkg</a></td>
<td>Installer</td>
<td>macOS</td>
<td>ARM64</td>
<td>139MB</td>
<td><tt>6f985e50497ff3cba8f216e49c6c767f6ad52c8e505f375983714c14749cb954</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.linux-386.tar.gz">go1.19.2.linux-386.tar.gz</a></td>
<td>Archive</td>
<td>Linux</td>
<td>x86</td>
<td>114MB</td>
<td><tt>ba8c97965e0856c69c9ca2c86f96bec5bb21de43e6533e25494bb211d85cda1b</tt></td>
</tr>
<tr class="highlight">
<td class="filename"><a class="download" href="/dl/go1.19.2.linux-amd64.tar.gz">go1.19.2.linux-amd64.tar.gz</a></td>
<td>Archive</td>
<td>Linux</td>
<td>x86-64</td>
<td>142MB</td>
<td><tt>5e8c5a74fe6470dd7e055a461acda8bb4050ead8c2df70f227e3ff7d8eb7eeb6</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.linux-arm64.tar.gz">go1.19.2.linux-arm64.tar.gz</a></td>
<td>Archive</td>
<td>Linux</td>
<td>ARM64</td>
<td>110MB</td>
<td><tt>b62a8d9654436c67c14a0c91e931d50440541f09eb991a987536cb982903126d</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.linux-armv6l.tar.gz">go1.19.2.linux-armv6l.tar.gz</a></td>
<td>Archive</td>
<td>Linux</td>
<td>ARMv6</td>
<td>111MB</td>
<td><tt>f3ccec7816ecd704ebafd130b08b8ad97c55402a8193a107b63e9de12ab90118</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.windows-386.zip">go1.19.2.windows-386.zip</a></td>
<td>Archive</td>
<td>Windows</td>
<td>x86</td>
<td>128MB</td>
<td><tt>9355b09b23e9db33945a7ba45bb75981ab0bb6006713099732167722cf081b53</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.windows-386.msi">go1.19.2.windows-386.msi</a></td>
<td>Installer</td>
<td>Windows</td>
<td>x86</td>
<td>111MB</td>
<td><tt>98b0f92a6c74469353917afff914457d6809de164251ddb45831c3f0efc269b6</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.windows-amd64.zip">go1.19.2.windows-amd64.zip</a></td>
<td>Archive</td>
<td>Windows</td>
<td>x86-64</td>
<td>156MB</td>
<td><tt>e132d4f0518b0d417eb6cc5f182c3385f6d24bb2eebee2566cd1a7ab6097e3f2</tt></td>
</tr>
<tr class="highlight">
<td class="filename"><a class="download" href="/dl/go1.19.2.windows-amd64.msi">go1.19.2.windows-amd64.msi</a></td>
<td>Installer</td>
<td>Windows</td>
<td>x86-64</td>
<td>135MB</td>
<td><tt>249aba207df30133deadb3419b2476479189a2c0d324e72faee4e1f1a6209eca</tt></td>
</tr>
<tr class="first"><th colspan="6" class="first">Other Ports</th></tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.freebsd-386.tar.gz">go1.19.2.freebsd-386.tar.gz</a></td>
<td>Archive</td>
<td>FreeBSD</td>
<td>x86</td>
<td>114MB</td>
<td><tt>7831a406447a14d964212d07f68e77cf7fe7fb7286bade6eeb9fbea39b192984</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.freebsd-amd64.tar.gz">go1.19.2.freebsd-amd64.tar.gz</a></td>
<td>Archive</td>
<td>FreeBSD</td>
<td>x86-64</td>
<td>142MB</td>
<td><tt>d74c88430484d14826ec21161e3b9336bd021f502b6594c4dd00e9ec730ee51d</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.linux-ppc64le.tar.gz">go1.19.2.linux-ppc64le.tar.gz</a></td>
<td>Archive</td>
<td>Linux</td>
<td>ppc64le</td>
<td>110MB</td>
<td><tt>37e1d4342f7103aeb9babeabe8c71ef3dba23db28db525071119e94b2aa21d7d</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.linux-s390x.tar.gz">go1.19.2.linux-s390x.tar.gz</a></td>
<td>Archive</td>
<td>Linux</td>
<td>s390x</td>
<td>114MB</td>
<td><tt>51b45dec41295215df17f78e67d1a373b9dda97a5e539bed440974da5ffc97de</tt></td>
</tr>
<tr>
<td class="filename"><a class="download" href="/dl/go1.19.2.windows-arm64.zip">go1.19.2.windows-arm64.zip</a></td>
<td>Archive</td>
<td>Windows</td>
<td>ARM64</td>
<td>122MB</td>
<td><tt>4049435f77fb2a0642fd8740c588aadbcc446056e637e835a8e223fdb897cb3e</tt></td>
</tr>
<tr class="highlight">
<td class="filename"><a class="download" href="/dl/go1.19.2.windows-arm64.msi">go1.19.2.windows-arm64.msi</a></td>
<td>Installer</td>
<td>Windows</td>
<td>ARM64</td>
<td>106MB</td>
<td><tt>979e8bf619c6dc25d27bd62c3d2325e730ccb486849c274b35fdffe9bd0bb827</tt></td>
</tr>
</table>