1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,206 @@
# SQL Input Plugin
This plugin reads metrics from performing SQL queries against a SQL
server. Different server types are supported and their settings might differ
(especially the connection parameters). Please check the list of [supported SQL
drivers](../../../docs/SQL_DRIVERS_INPUT.md) for the `driver` name and options
for the data-source-name (`dsn`) options.
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Secret-store support
This plugin supports secrets from secret-stores for the `dsn` option.
See the [secret-store documentation][SECRETSTORE] for more details on how
to use them.
[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets
## Configuration
```toml @sample.conf
# Read metrics from SQL queries
[[inputs.sql]]
## Database Driver
## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for
## a list of supported drivers.
driver = "mysql"
## Data source name for connecting
## The syntax and supported options depends on selected driver.
dsn = "username:password@tcp(mysqlserver:3307)/dbname?param=value"
## Timeout for any operation
## Note that the timeout for queries is per query not per gather.
# timeout = "5s"
## Connection time limits
## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections
## will not be closed automatically. If you specify a positive time, the connections will be closed after
## idleing or existing for at least that amount of time, respectively.
# connection_max_idle_time = "0s"
# connection_max_life_time = "0s"
## Connection count limits
## By default the number of open connections is not limited and the number of maximum idle connections
## will be inferred from the number of queries specified. If you specify a positive number for any of the
## two options, connections will be closed when reaching the specified limit. The number of idle connections
## will be clipped to the maximum number of connections limit if any.
# connection_max_open = 0
# connection_max_idle = auto
## Specifies plugin behavior regarding disconnected servers
## Available choices :
## - error: telegraf will return an error on startup if one the servers is unreachable
## - ignore: telegraf will ignore unreachable servers on both startup and gather
# disconnected_servers_behavior = "error"
[[inputs.sql.query]]
## Query to perform on the server
query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0"
## Alternatively to specifying the query directly you can select a file here containing the SQL query.
## Only one of 'query' and 'query_script' can be specified!
# query_script = "/path/to/sql/script.sql"
## Name of the measurement
## In case both measurement and 'measurement_col' are given, the latter takes precedence.
# measurement = "sql"
## Column name containing the name of the measurement
## If given, this will take precedence over the 'measurement' setting. In case a query result
## does not contain the specified column, we fall-back to the 'measurement' setting.
# measurement_column = ""
## Column name containing the time of the measurement
## If omitted, the time of the query will be used.
# time_column = ""
## Format of the time contained in 'time_col'
## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format.
## See https://golang.org/pkg/time/#Time.Format for details.
# time_format = "unix"
## Column names containing tags
## An empty include list will reject all columns and an empty exclude list will not exclude any column.
## I.e. by default no columns will be returned as tag and the tags are empty.
# tag_columns_include = []
# tag_columns_exclude = []
## Column names containing fields (explicit types)
## Convert the given columns to the corresponding type. Explicit type conversions take precedence over
## the automatic (driver-based) conversion below.
## NOTE: Columns should not be specified for multiple types or the resulting type is undefined.
# field_columns_float = []
# field_columns_int = []
# field_columns_uint = []
# field_columns_bool = []
# field_columns_string = []
## Column names containing fields (automatic types)
## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty
## exclude list will not exclude any column. I.e. by default all columns will be returned as fields.
## NOTE: We rely on the database driver to perform automatic datatype conversion.
# field_columns_include = []
# field_columns_exclude = []
```
## Options
### Driver
The `driver` and `dsn` options specify how to connect to the database. As
especially the `dsn` format and values vary with the `driver` refer to the list
of [supported SQL drivers](../../../docs/SQL_DRIVERS_INPUT.md) for possible
values and more details.
### Connection limits
With these options you can limit the number of connections kept open by this
plugin. Details about the exact workings can be found in the [golang sql
documentation](https://golang.org/pkg/database/sql/#DB.SetConnMaxIdleTime).
### Query sections
Multiple `query` sections can be specified for this plugin. Each specified query
will first be prepared on the server and then executed in every interval using
the column mappings specified. Please note that `tag` and `field` columns are
not exclusive, i.e. a column can be added to both. When using both `include` and
`exclude` lists, the `exclude` list takes precedence over the `include`
list. I.e. given you specify `foo` in both lists, `foo` will _never_ pass the
filter. In case any the columns specified in `measurement_col` or `time_col` are
_not_ returned by the query, the plugin falls-back to the documented
defaults. Fields or tags specified in the includes of the options but missing in
the returned query are silently ignored.
## Types
This plugin relies on the driver to do the type conversion. For the different
properties of the metric the following types are accepted.
### Measurement
Only columns of type `string` are accepted.
### Time
For the metric time columns of type `time` are accepted directly. For numeric
columns, `time_format` should be set to any of `unix`, `unix_ms`, `unix_ns` or
`unix_us` accordingly. By default the a timestamp in `unix` format is
expected. For string columns, please specify the `time_format` accordingly. See
the [golang time documentation](https://golang.org/pkg/time/#Time.Format) for
details.
### Tags
For tags columns with textual values (`string` and `bytes`), signed and unsigned
integers (8, 16, 32 and 64 bit), floating-point (32 and 64 bit), `boolean` and
`time` values are accepted. Those values will be converted to string.
### Fields
For fields columns with textual values (`string` and `bytes`), signed and
unsigned integers (8, 16, 32 and 64 bit), floating-point (32 and 64 bit),
`boolean` and `time` values are accepted. Here `bytes` will be converted to
`string`, signed and unsigned integer values will be converted to `int64` or
`uint64` respectively. Floating-point values are converted to `float64` and
`time` is converted to a nanosecond timestamp of type `int64`.
## Example Output
Using the [MariaDB sample database][maria-sample] and the configuration
```toml
[[inputs.sql]]
driver = "mysql"
dsn = "root:password@/nation"
[[inputs.sql.query]]
query="SELECT * FROM guests"
measurement = "nation"
tag_columns_include = ["name"]
field_columns_exclude = ["name"]
```
Telegraf will output the following metrics
```text
nation,host=Hugin,name=John guest_id=1i 1611332164000000000
nation,host=Hugin,name=Jane guest_id=2i 1611332164000000000
nation,host=Hugin,name=Jean guest_id=3i 1611332164000000000
nation,host=Hugin,name=Storm guest_id=4i 1611332164000000000
nation,host=Hugin,name=Beast guest_id=5i 1611332164000000000
```
[maria-sample]: https://www.mariadbtutorial.com/getting-started/mariadb-sample-database
## Metrics
The format of metrics produced by this plugin depends on the content and data
format of the file.

View file

@ -0,0 +1,13 @@
package sql
import (
// Blank imports to register the drivers
_ "github.com/ClickHouse/clickhouse-go/v2"
_ "github.com/IBM/nzgo/v12"
_ "github.com/SAP/go-hdb/driver"
_ "github.com/apache/arrow-go/v18/arrow/flight/flightsql/driver"
_ "github.com/go-sql-driver/mysql"
_ "github.com/jackc/pgx/v4/stdlib"
_ "github.com/microsoft/go-mssqldb"
_ "github.com/sijms/go-ora/v2"
)

View file

@ -0,0 +1,8 @@
//go:build !mips && !mipsle && !mips64 && !ppc64 && !riscv64 && !loong64 && !mips64le && !(windows && (386 || arm))
package sql
import (
// Blank imports to register the sqlite driver
_ "modernc.org/sqlite"
)

View file

@ -0,0 +1,83 @@
# Read metrics from SQL queries
[[inputs.sql]]
## Database Driver
## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for
## a list of supported drivers.
driver = "mysql"
## Data source name for connecting
## The syntax and supported options depends on selected driver.
dsn = "username:password@tcp(mysqlserver:3307)/dbname?param=value"
## Timeout for any operation
## Note that the timeout for queries is per query not per gather.
# timeout = "5s"
## Connection time limits
## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections
## will not be closed automatically. If you specify a positive time, the connections will be closed after
## idleing or existing for at least that amount of time, respectively.
# connection_max_idle_time = "0s"
# connection_max_life_time = "0s"
## Connection count limits
## By default the number of open connections is not limited and the number of maximum idle connections
## will be inferred from the number of queries specified. If you specify a positive number for any of the
## two options, connections will be closed when reaching the specified limit. The number of idle connections
## will be clipped to the maximum number of connections limit if any.
# connection_max_open = 0
# connection_max_idle = auto
## Specifies plugin behavior regarding disconnected servers
## Available choices :
## - error: telegraf will return an error on startup if one the servers is unreachable
## - ignore: telegraf will ignore unreachable servers on both startup and gather
# disconnected_servers_behavior = "error"
[[inputs.sql.query]]
## Query to perform on the server
query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0"
## Alternatively to specifying the query directly you can select a file here containing the SQL query.
## Only one of 'query' and 'query_script' can be specified!
# query_script = "/path/to/sql/script.sql"
## Name of the measurement
## In case both measurement and 'measurement_col' are given, the latter takes precedence.
# measurement = "sql"
## Column name containing the name of the measurement
## If given, this will take precedence over the 'measurement' setting. In case a query result
## does not contain the specified column, we fall-back to the 'measurement' setting.
# measurement_column = ""
## Column name containing the time of the measurement
## If omitted, the time of the query will be used.
# time_column = ""
## Format of the time contained in 'time_col'
## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format.
## See https://golang.org/pkg/time/#Time.Format for details.
# time_format = "unix"
## Column names containing tags
## An empty include list will reject all columns and an empty exclude list will not exclude any column.
## I.e. by default no columns will be returned as tag and the tags are empty.
# tag_columns_include = []
# tag_columns_exclude = []
## Column names containing fields (explicit types)
## Convert the given columns to the corresponding type. Explicit type conversions take precedence over
## the automatic (driver-based) conversion below.
## NOTE: Columns should not be specified for multiple types or the resulting type is undefined.
# field_columns_float = []
# field_columns_int = []
# field_columns_uint = []
# field_columns_bool = []
# field_columns_string = []
## Column names containing fields (automatic types)
## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty
## exclude list will not exclude any column. I.e. by default all columns will be returned as fields.
## NOTE: We rely on the database driver to perform automatic datatype conversion.
# field_columns_include = []
# field_columns_exclude = []

546
plugins/inputs/sql/sql.go Normal file
View file

@ -0,0 +1,546 @@
//go:generate ../../../tools/readme_config_includer/generator
package sql
import (
"context"
dbsql "database/sql"
_ "embed"
"errors"
"fmt"
"os"
"sort"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/choice"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
var disconnectedServersBehavior = []string{"error", "ignore"}
const magicIdleCount = -int(^uint(0) >> 1)
type SQL struct {
Driver string `toml:"driver"`
Dsn config.Secret `toml:"dsn"`
Timeout config.Duration `toml:"timeout"`
MaxIdleTime config.Duration `toml:"connection_max_idle_time"`
MaxLifetime config.Duration `toml:"connection_max_life_time"`
MaxOpenConnections int `toml:"connection_max_open"`
MaxIdleConnections int `toml:"connection_max_idle"`
Queries []query `toml:"query"`
Log telegraf.Logger `toml:"-"`
DisconnectedServersBehavior string `toml:"disconnected_servers_behavior"`
driverName string
db *dbsql.DB
serverConnected bool
}
type query struct {
Query string `toml:"query"`
Script string `toml:"query_script"`
Measurement string `toml:"measurement"`
MeasurementColumn string `toml:"measurement_column"`
TimeColumn string `toml:"time_column"`
TimeFormat string `toml:"time_format"`
TagColumnsInclude []string `toml:"tag_columns_include"`
TagColumnsExclude []string `toml:"tag_columns_exclude"`
FieldColumnsInclude []string `toml:"field_columns_include"`
FieldColumnsExclude []string `toml:"field_columns_exclude"`
FieldColumnsFloat []string `toml:"field_columns_float"`
FieldColumnsInt []string `toml:"field_columns_int"`
FieldColumnsUint []string `toml:"field_columns_uint"`
FieldColumnsBool []string `toml:"field_columns_bool"`
FieldColumnsString []string `toml:"field_columns_string"`
statement *dbsql.Stmt
tagFilter filter.Filter
fieldFilter filter.Filter
fieldFilterFloat filter.Filter
fieldFilterInt filter.Filter
fieldFilterUint filter.Filter
fieldFilterBool filter.Filter
fieldFilterString filter.Filter
}
func (*SQL) SampleConfig() string {
return sampleConfig
}
func (s *SQL) Init() error {
// Option handling
if s.Driver == "" {
return errors.New("missing SQL driver option")
}
if err := s.checkDSN(); err != nil {
return err
}
if s.Timeout <= 0 {
s.Timeout = config.Duration(5 * time.Second)
}
if s.MaxIdleConnections == magicIdleCount {
// Determine the number by the number of queries + the golang default value
s.MaxIdleConnections = len(s.Queries) + 2
}
for i, q := range s.Queries {
if q.Query == "" && q.Script == "" {
return errors.New("neither 'query' nor 'query_script' specified")
}
if q.Query != "" && q.Script != "" {
return errors.New("only one of 'query' and 'query_script' can be specified")
}
// In case we got a script, we should read the query now.
if q.Script != "" {
query, err := os.ReadFile(q.Script)
if err != nil {
return fmt.Errorf("reading script %q failed: %w", q.Script, err)
}
s.Queries[i].Query = string(query)
}
// Time format
if q.TimeFormat == "" {
s.Queries[i].TimeFormat = "unix"
}
// Compile the tag-filter
tagfilter, err := filter.NewIncludeExcludeFilterDefaults(q.TagColumnsInclude, q.TagColumnsExclude, false, false)
if err != nil {
return fmt.Errorf("creating tag filter failed: %w", err)
}
s.Queries[i].tagFilter = tagfilter
// Compile the explicit type field-filter
fieldfilterFloat, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsFloat, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for float failed: %w", err)
}
s.Queries[i].fieldFilterFloat = fieldfilterFloat
fieldfilterInt, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsInt, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for int failed: %w", err)
}
s.Queries[i].fieldFilterInt = fieldfilterInt
fieldfilterUint, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsUint, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for uint failed: %w", err)
}
s.Queries[i].fieldFilterUint = fieldfilterUint
fieldfilterBool, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsBool, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for bool failed: %w", err)
}
s.Queries[i].fieldFilterBool = fieldfilterBool
fieldfilterString, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsString, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for string failed: %w", err)
}
s.Queries[i].fieldFilterString = fieldfilterString
// Compile the field-filter
fieldfilter, err := filter.NewIncludeExcludeFilter(q.FieldColumnsInclude, q.FieldColumnsExclude)
if err != nil {
return fmt.Errorf("creating field filter failed: %w", err)
}
s.Queries[i].fieldFilter = fieldfilter
if q.Measurement == "" {
s.Queries[i].Measurement = "sql"
}
}
// Derive the sql-framework driver name from our config name. This abstracts the actual driver
// from the database-type the user wants.
aliases := map[string]string{
"cockroach": "pgx",
"tidb": "mysql",
"mssql": "sqlserver",
"maria": "mysql",
"postgres": "pgx",
"oracle": "oracle",
}
s.driverName = s.Driver
if driver, ok := aliases[s.Driver]; ok {
s.driverName = driver
}
availDrivers := dbsql.Drivers()
if !choice.Contains(s.driverName, availDrivers) {
for d, r := range aliases {
if choice.Contains(r, availDrivers) {
availDrivers = append(availDrivers, d)
}
}
// Sort the list of drivers and make them unique
sort.Strings(availDrivers)
last := 0
for _, d := range availDrivers {
if d != availDrivers[last] {
last++
availDrivers[last] = d
}
}
availDrivers = availDrivers[:last+1]
return fmt.Errorf("driver %q not supported use one of %v", s.Driver, availDrivers)
}
if s.DisconnectedServersBehavior == "" {
s.DisconnectedServersBehavior = "error"
}
if !choice.Contains(s.DisconnectedServersBehavior, disconnectedServersBehavior) {
return fmt.Errorf("%q is not a valid value for disconnected_servers_behavior", s.DisconnectedServersBehavior)
}
return nil
}
func (s *SQL) Start(telegraf.Accumulator) error {
if err := s.setupConnection(); err != nil {
return err
}
if err := s.ping(); err != nil {
if s.DisconnectedServersBehavior == "error" {
return err
}
s.Log.Errorf("unable to connect to database: %s", err)
}
if s.serverConnected {
s.prepareStatements()
}
return nil
}
func (s *SQL) Gather(acc telegraf.Accumulator) error {
// during plugin startup, it is possible that the server was not reachable.
// we try pinging the server in this collection cycle.
// we are only concerned with `prepareStatements` function to complete(return true), just once.
if !s.serverConnected {
if err := s.ping(); err != nil {
return err
}
s.prepareStatements()
}
var wg sync.WaitGroup
tstart := time.Now()
for _, q := range s.Queries {
wg.Add(1)
go func(q query) {
defer wg.Done()
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout))
defer cancel()
if err := s.executeQuery(ctx, acc, q, tstart); err != nil {
acc.AddError(err)
}
}(q)
}
wg.Wait()
s.Log.Debugf("Executed %d queries in %s", len(s.Queries), time.Since(tstart).String())
return nil
}
func (s *SQL) Stop() {
// Free the statements
for _, q := range s.Queries {
if q.statement != nil {
if err := q.statement.Close(); err != nil {
s.Log.Errorf("closing statement for query %q failed: %v", q.Query, err)
}
}
}
// Close the connection to the server
if s.db != nil {
if err := s.db.Close(); err != nil {
s.Log.Errorf("closing database connection failed: %v", err)
}
}
}
func (s *SQL) setupConnection() error {
// Connect to the database server
dsnSecret, err := s.Dsn.Get()
if err != nil {
return fmt.Errorf("getting DSN failed: %w", err)
}
dsn := dsnSecret.String()
dsnSecret.Destroy()
s.Log.Debug("Connecting...")
s.db, err = dbsql.Open(s.driverName, dsn)
if err != nil {
// should return since the error is most likely with invalid DSN string format
return err
}
// Set the connection limits
// s.db.SetConnMaxIdleTime(time.Duration(s.MaxIdleTime)) // Requires go >= 1.15
s.db.SetConnMaxLifetime(time.Duration(s.MaxLifetime))
s.db.SetMaxOpenConns(s.MaxOpenConnections)
s.db.SetMaxIdleConns(s.MaxIdleConnections)
return nil
}
func (s *SQL) ping() error {
// Test if the connection can be established
s.Log.Debug("Testing connectivity...")
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout))
err := s.db.PingContext(ctx)
cancel()
if err != nil {
return fmt.Errorf("unable to connect to database: %w", err)
}
s.serverConnected = true
return nil
}
func (s *SQL) prepareStatements() {
// Prepare the statements
for i, q := range s.Queries {
s.Log.Debugf("Preparing statement %q...", q.Query)
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout))
stmt, err := s.db.PrepareContext(ctx, q.Query)
cancel()
if err != nil {
// Some database drivers or databases do not support prepare
// statements and report an error here. However, we can still
// execute unprepared queries for those setups so do not bail-out
// here but simply do leave the `statement` with a `nil` value
// indicating no prepared statement.
s.Log.Warnf("preparing query %q failed: %s; falling back to unprepared query", q.Query, err)
continue
}
s.Queries[i].statement = stmt
}
}
func (s *SQL) executeQuery(ctx context.Context, acc telegraf.Accumulator, q query, tquery time.Time) error {
// Execute the query either prepared or unprepared
var rows *dbsql.Rows
if q.statement != nil {
// Use the previously prepared query
var err error
rows, err = q.statement.QueryContext(ctx)
if err != nil {
return err
}
} else {
// Fallback to unprepared query
var err error
rows, err = s.db.Query(q.Query)
if err != nil {
return err
}
}
defer rows.Close()
// Handle the rows
columnNames, err := rows.Columns()
if err != nil {
return err
}
rowCount, err := q.parse(acc, rows, tquery, s.Log)
s.Log.Debugf("Received %d rows and %d columns for query %q", rowCount, len(columnNames), q.Query)
return err
}
func (s *SQL) checkDSN() error {
if s.Dsn.Empty() {
return errors.New("missing data source name (DSN) option")
}
return nil
}
func (q *query) parse(acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time, logger telegraf.Logger) (int, error) {
columnNames, err := rows.Columns()
if err != nil {
return 0, err
}
// Prepare the list of datapoints according to the received row
columnData := make([]interface{}, len(columnNames))
columnDataPtr := make([]interface{}, len(columnNames))
for i := range columnData {
columnDataPtr[i] = &columnData[i]
}
rowCount := 0
for rows.Next() {
measurement := q.Measurement
timestamp := t
tags := make(map[string]string)
fields := make(map[string]interface{}, len(columnNames))
// Do the parsing with (hopefully) automatic type conversion
if err := rows.Scan(columnDataPtr...); err != nil {
return 0, err
}
for i, name := range columnNames {
if q.MeasurementColumn != "" && name == q.MeasurementColumn {
switch raw := columnData[i].(type) {
case string:
measurement = raw
case []byte:
measurement = string(raw)
default:
return 0, fmt.Errorf("measurement column type \"%T\" unsupported", columnData[i])
}
}
if q.TimeColumn != "" && name == q.TimeColumn {
var fieldvalue interface{}
var skipParsing bool
switch v := columnData[i].(type) {
case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
fieldvalue = v
case []byte:
fieldvalue = string(v)
case time.Time:
timestamp = v
skipParsing = true
case fmt.Stringer:
fieldvalue = v.String()
default:
return 0, fmt.Errorf("time column %q of type \"%T\" unsupported", name, columnData[i])
}
if !skipParsing {
if timestamp, err = internal.ParseTimestamp(q.TimeFormat, fieldvalue, nil); err != nil {
return 0, fmt.Errorf("parsing time failed: %w", err)
}
}
}
if q.tagFilter.Match(name) {
tagvalue, err := internal.ToString(columnData[i])
if err != nil {
return 0, fmt.Errorf("converting tag column %q failed: %w", name, err)
}
if v := strings.TrimSpace(tagvalue); v != "" {
tags[name] = v
}
}
// Explicit type conversions take precedence
if q.fieldFilterFloat.Match(name) {
v, err := internal.ToFloat64(columnData[i])
if err != nil {
return 0, fmt.Errorf("converting field column %q to float failed: %w", name, err)
}
fields[name] = v
continue
}
if q.fieldFilterInt.Match(name) {
v, err := internal.ToInt64(columnData[i])
if err != nil {
if !errors.Is(err, internal.ErrOutOfRange) {
return 0, fmt.Errorf("converting field column %q to int failed: %w", name, err)
}
logger.Warnf("field column %q: %v", name, err)
}
fields[name] = v
continue
}
if q.fieldFilterUint.Match(name) {
v, err := internal.ToUint64(columnData[i])
if err != nil {
if !errors.Is(err, internal.ErrOutOfRange) {
return 0, fmt.Errorf("converting field column %q to uint failed: %w", name, err)
}
logger.Warnf("field column %q: %v", name, err)
}
fields[name] = v
continue
}
if q.fieldFilterBool.Match(name) {
v, err := internal.ToBool(columnData[i])
if err != nil {
return 0, fmt.Errorf("converting field column %q to bool failed: %w", name, err)
}
fields[name] = v
continue
}
if q.fieldFilterString.Match(name) {
v, err := internal.ToString(columnData[i])
if err != nil {
return 0, fmt.Errorf("converting field column %q to string failed: %w", name, err)
}
fields[name] = v
continue
}
// Try automatic conversion for all remaining fields
if q.fieldFilter.Match(name) {
var fieldvalue interface{}
switch v := columnData[i].(type) {
case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool:
fieldvalue = v
case []byte:
fieldvalue = string(v)
case time.Time:
fieldvalue = v.UnixNano()
case nil:
fieldvalue = nil
case fmt.Stringer:
fieldvalue = v.String()
default:
return 0, fmt.Errorf("field column %q of type \"%T\" unsupported", name, columnData[i])
}
if fieldvalue != nil {
fields[name] = fieldvalue
}
}
}
acc.AddFields(measurement, fields, tags, timestamp)
rowCount++
}
if err := rows.Err(); err != nil {
return rowCount, err
}
return rowCount, nil
}
func init() {
inputs.Add("sql", func() telegraf.Input {
return &SQL{
MaxIdleTime: config.Duration(0), // unlimited
MaxLifetime: config.Duration(0), // unlimited
MaxOpenConnections: 0, // unlimited
MaxIdleConnections: magicIdleCount, // will trigger auto calculation
}
})
}

View file

@ -0,0 +1,306 @@
package sql
import (
"fmt"
"path/filepath"
"testing"
"time"
"github.com/docker/go-connections/nat"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go/wait"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/testutil"
)
func TestMariaDBIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
logger := testutil.Logger{}
port := "3306"
password := testutil.GetRandomString(32)
database := "foo"
// Determine the test-data mountpoint
testdata, err := filepath.Abs("testdata/mariadb/expected.sql")
require.NoError(t, err, "determining absolute path of test-data failed")
container := testutil.Container{
Image: "mariadb",
ExposedPorts: []string{port},
Env: map[string]string{
"MYSQL_ROOT_PASSWORD": password,
"MYSQL_DATABASE": database,
},
Files: map[string]string{
"/docker-entrypoint-initdb.d/expected.sql": testdata,
},
WaitingFor: wait.ForAll(
wait.ForLog("mariadbd: ready for connections.").WithOccurrence(2),
wait.ForListeningPort(nat.Port(port)),
),
}
require.NoError(t, container.Start(), "failed to start container")
defer container.Terminate()
// Define the testset
var testset = []struct {
name string
queries []query
expected []telegraf.Metric
}{
{
name: "metric_one",
queries: []query{
{
Query: "SELECT * FROM metric_one",
TagColumnsInclude: []string{"tag_*"},
FieldColumnsExclude: []string{"tag_*", "timestamp"},
TimeColumn: "timestamp",
TimeFormat: "2006-01-02 15:04:05",
},
},
expected: []telegraf.Metric{
testutil.MustMetric(
"sql",
map[string]string{
"tag_one": "tag1",
"tag_two": "tag2",
},
map[string]interface{}{
"int64_one": int64(1234),
"int64_two": int64(2345),
},
time.Date(2021, 5, 17, 22, 4, 45, 0, time.UTC),
),
},
},
}
for _, tt := range testset {
t.Run(tt.name, func(t *testing.T) {
// Setup the plugin-under-test
dsn := fmt.Sprintf("root:%s@tcp(%s:%s)/%s", password, container.Address, container.Ports[port], database)
secret := config.NewSecret([]byte(dsn))
plugin := &SQL{
Driver: "maria",
Dsn: secret,
Queries: tt.queries,
Log: logger,
}
var acc testutil.Accumulator
// Startup the plugin
require.NoError(t, plugin.Init())
require.NoError(t, plugin.Start(&acc))
// Gather
require.NoError(t, plugin.Gather(&acc))
require.Empty(t, acc.Errors)
// Stopping the plugin
plugin.Stop()
// Do the comparison
testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics())
})
}
}
func TestPostgreSQLIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
logger := testutil.Logger{}
port := "5432"
password := testutil.GetRandomString(32)
database := "foo"
// Determine the test-data mountpoint
testdata, err := filepath.Abs("testdata/postgres/expected.sql")
require.NoError(t, err, "determining absolute path of test-data failed")
container := testutil.Container{
Image: "postgres",
ExposedPorts: []string{port},
Env: map[string]string{
"POSTGRES_PASSWORD": password,
"POSTGRES_DB": database,
},
Files: map[string]string{
"/docker-entrypoint-initdb.d/expected.sql": testdata,
},
WaitingFor: wait.ForAll(
wait.ForLog("database system is ready to accept connections").WithOccurrence(2),
wait.ForListeningPort(nat.Port(port)),
),
}
require.NoError(t, container.Start(), "failed to start container")
defer container.Terminate()
// Define the testset
var testset = []struct {
name string
queries []query
expected []telegraf.Metric
}{
{
name: "metric_one",
queries: []query{
{
Query: "SELECT * FROM metric_one",
TagColumnsInclude: []string{"tag_*"},
FieldColumnsExclude: []string{"tag_*", "timestamp"},
TimeColumn: "timestamp",
TimeFormat: "2006-01-02 15:04:05",
},
},
expected: []telegraf.Metric{
testutil.MustMetric(
"sql",
map[string]string{
"tag_one": "tag1",
"tag_two": "tag2",
},
map[string]interface{}{
"int64_one": int64(1234),
"int64_two": int64(2345),
},
time.Date(2021, 5, 17, 22, 4, 45, 0, time.UTC),
),
},
},
}
for _, tt := range testset {
t.Run(tt.name, func(t *testing.T) {
// Setup the plugin-under-test
dsn := fmt.Sprintf("postgres://postgres:%s@%s:%s/%s", password, container.Address, container.Ports[port], database)
secret := config.NewSecret([]byte(dsn))
plugin := &SQL{
Driver: "pgx",
Dsn: secret,
Queries: tt.queries,
Log: logger,
}
var acc testutil.Accumulator
// Startup the plugin
require.NoError(t, plugin.Init())
require.NoError(t, plugin.Start(&acc))
// Gather
require.NoError(t, plugin.Gather(&acc))
require.Empty(t, acc.Errors)
// Stopping the plugin
plugin.Stop()
// Do the comparison
testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics())
})
}
}
func TestClickHouseIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
logger := testutil.Logger{}
port := "9000"
user := "default"
// Determine the test-data mountpoint
testdata, err := filepath.Abs("testdata/clickhouse/expected.sql")
require.NoError(t, err, "determining absolute path of test-data failed")
container := testutil.Container{
Image: "yandex/clickhouse-server",
ExposedPorts: []string{port, "8123"},
Files: map[string]string{
"/docker-entrypoint-initdb.d/expected.sql": testdata,
},
WaitingFor: wait.ForAll(
wait.NewHTTPStrategy("/").WithPort(nat.Port("8123")),
wait.ForListeningPort(nat.Port(port)),
wait.ForLog("Saved preprocessed configuration to '/var/lib/clickhouse/preprocessed_configs/users.xml'.").WithOccurrence(2),
),
}
require.NoError(t, container.Start(), "failed to start container")
defer container.Terminate()
// Define the testset
var testset = []struct {
name string
queries []query
expected []telegraf.Metric
}{
{
name: "metric_one",
queries: []query{
{
Query: "SELECT * FROM default.metric_one",
TagColumnsInclude: []string{"tag_*"},
FieldColumnsExclude: []string{"tag_*", "timestamp"},
TimeColumn: "timestamp",
TimeFormat: "unix",
},
},
expected: []telegraf.Metric{
testutil.MustMetric(
"sql",
map[string]string{
"tag_one": "tag1",
"tag_two": "tag2",
},
map[string]interface{}{
"int64_one": int64(1234),
"int64_two": int64(2345),
},
time.Unix(1621289085, 0),
),
},
},
}
for _, tt := range testset {
t.Run(tt.name, func(t *testing.T) {
// Setup the plugin-under-test
dsn := fmt.Sprintf("tcp://%s:%s?username=%s", container.Address, container.Ports[port], user)
secret := config.NewSecret([]byte(dsn))
plugin := &SQL{
Driver: "clickhouse",
Dsn: secret,
Queries: tt.queries,
Log: logger,
}
var acc testutil.Accumulator
// Startup the plugin
require.NoError(t, plugin.Init())
require.NoError(t, plugin.Start(&acc))
// Gather
require.NoError(t, plugin.Gather(&acc))
require.Empty(t, acc.Errors)
// Stopping the plugin
plugin.Stop()
// Do the comparison
testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics())
})
}
}

View file

@ -0,0 +1,15 @@
CREATE TABLE IF NOT EXISTS default.metric_one (
tag_one String,
tag_two String,
int64_one Int64,
int64_two Int64,
timestamp Int64
) ENGINE MergeTree() ORDER BY timestamp;
INSERT INTO default.metric_one (
tag_one,
tag_two,
int64_one,
int64_two,
timestamp
) VALUES ('tag1', 'tag2', 1234, 2345, 1621289085);

View file

@ -0,0 +1,36 @@
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `bar` (
`baz` int(11) DEFAULT NULL
);
/*!40101 SET character_set_client = @saved_cs_client */;
INSERT INTO `bar` VALUES (1);
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `metric three` (
`timestamp` timestamp NOT NULL DEFAULT current_timestamp(),
`tag four` text DEFAULT NULL,
`string two` text DEFAULT NULL
);
/*!40101 SET character_set_client = @saved_cs_client */;
INSERT INTO `metric three` VALUES ('2021-05-17 22:04:45','tag4','string2');
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `metric_one` (
`timestamp` timestamp NOT NULL DEFAULT current_timestamp(),
`tag_one` text DEFAULT NULL,
`tag_two` text DEFAULT NULL,
`int64_one` int(11) DEFAULT NULL,
`int64_two` int(11) DEFAULT NULL
);
/*!40101 SET character_set_client = @saved_cs_client */;
INSERT INTO `metric_one` VALUES ('2021-05-17 22:04:45','tag1','tag2',1234,2345);
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `metric_two` (
`timestamp` timestamp NOT NULL DEFAULT current_timestamp(),
`tag_three` text DEFAULT NULL,
`string_one` text DEFAULT NULL
);
/*!40101 SET character_set_client = @saved_cs_client */;
INSERT INTO `metric_two` VALUES ('2021-05-17 22:04:45','tag3','string1');

View file

@ -0,0 +1,41 @@
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET xmloption = content;
SET client_min_messages = warning;
SET row_security = off;
SET default_tablespace = '';
SET default_table_access_method = heap;
CREATE TABLE public."metric three" (
"timestamp" timestamp without time zone,
"tag four" text,
"string two" text
);
ALTER TABLE public."metric three" OWNER TO postgres;
CREATE TABLE public.metric_one (
"timestamp" timestamp without time zone,
tag_one text,
tag_two text,
int64_one integer,
int64_two integer
);
ALTER TABLE public.metric_one OWNER TO postgres;
CREATE TABLE public.metric_two (
"timestamp" timestamp without time zone,
tag_three text,
string_one text
);
ALTER TABLE public.metric_two OWNER TO postgres;
COPY public."metric three" ("timestamp", "tag four", "string two") FROM stdin;
2021-05-17 22:04:45 tag4 string2
\.
COPY public.metric_one ("timestamp", tag_one, tag_two, int64_one, int64_two) FROM stdin;
2021-05-17 22:04:45 tag1 tag2 1234 2345
\.
COPY public.metric_two ("timestamp", tag_three, string_one) FROM stdin;
2021-05-17 22:04:45 tag3 string1
\.