1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,403 @@
# MySQL Input Plugin
This plugin gathers statistics from [MySQL][mysql] server instances.
> [!NOTE]
> To gather metrics from the performance schema, it must first be enabled in
> MySQL. See the performance schema [quick start][quick-start] for details.
⭐ Telegraf v0.1.1
🏷️ datastore
💻 all
[mysql]: https://www.mysql.com/
[quick-start]: https://dev.mysql.com/doc/refman/8.0/en/performance-schema-quick-start.html
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Read metrics from one or many mysql servers
[[inputs.mysql]]
## specify servers via a url matching:
## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
## e.g.
## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
#
## If no servers are specified, then localhost is used as the host.
servers = ["tcp(127.0.0.1:3306)/"]
## Selects the metric output format.
##
## This option exists to maintain backwards compatibility, if you have
## existing metrics do not set or change this value until you are ready to
## migrate to the new format.
##
## If you do not have existing metrics from this plugin set to the latest
## version.
##
## Telegraf >=1.6: metric_version = 2
## <1.6: metric_version = 1 (or unset)
metric_version = 2
## if the list is empty, then metrics are gathered from all database tables
# table_schema_databases = []
## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided
## in the list above
# gather_table_schema = false
## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
# gather_process_list = false
## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
# gather_user_statistics = false
## gather auto_increment columns and max values from information schema
# gather_info_schema_auto_inc = false
## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
# gather_innodb_metrics = false
## gather metrics from all channels from SHOW SLAVE STATUS command output
# gather_all_slave_channels = false
## gather metrics from SHOW SLAVE STATUS command output
# gather_slave_status = false
## gather metrics from SHOW REPLICA STATUS command output
# gather_replica_status = false
## use SHOW ALL SLAVES STATUS command output for MariaDB
## use SHOW ALL REPLICAS STATUS command if enable gather replica status
# mariadb_dialect = false
## gather metrics from SHOW BINARY LOGS command output
# gather_binary_logs = false
## gather metrics from SHOW GLOBAL VARIABLES command output
# gather_global_variables = true
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
# gather_table_io_waits = false
## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
# gather_table_lock_waits = false
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
# gather_index_io_waits = false
## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
# gather_event_waits = false
## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
# gather_file_events_stats = false
## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
# gather_perf_events_statements = false
#
## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
# gather_perf_sum_per_acc_per_event = false
#
## list of events to be gathered for gather_perf_sum_per_acc_per_event
## in case of empty list all events will be gathered
# perf_summary_events = []
## the limits for metrics form perf_events_statements
# perf_events_statements_digest_text_limit = 120
# perf_events_statements_limit = 250
# perf_events_statements_time_limit = 86400
## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
## example: interval_slow = "30m"
# interval_slow = ""
## Optional TLS Config (used if tls=custom parameter specified in server uri)
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```
### String Data
Some fields may return string data. This is unhelpful for some outputs where
numeric data is required (e.g. Prometheus). In these cases, users can make use
of the enum processor to convert string values to numeric values. Below is an
example using the `slave_slave_io_running` field, which can have a variety of
string values:
```toml
[[processors.enum]]
namepass = "mysql"
[[processors.enum.mapping]]
field = "slave_slave_io_running"
dest = "slave_slave_io_running_int"
default = 4
[processors.enum.mapping.value_mappings]
Yes = 0
No = 1
Preparing = 2
Connecting = 3
```
### Metric Version
When `metric_version = 2`, a variety of field type issues are corrected as well
as naming inconsistencies. If you have existing data on the original version
enabling this feature will cause a `field type error` when inserted into
InfluxDB due to the change of types. For this reason, you should keep the
`metric_version` unset until you are ready to migrate to the new format.
If preserving your old data is not required you may wish to drop conflicting
measurements:
```sql
DROP SERIES from mysql
DROP SERIES from mysql_variables
DROP SERIES from mysql_innodb
```
Otherwise, migration can be performed using the following steps:
1. Duplicate your `mysql` plugin configuration and add a `name_suffix` and
`metric_version = 2`, this will result in collection using both the old and new
style concurrently:
```toml
[[inputs.mysql]]
servers = ["tcp(127.0.0.1:3306)/"]
[[inputs.mysql]]
name_suffix = "_v2"
metric_version = 2
servers = ["tcp(127.0.0.1:3306)/"]
```
2. Upgrade all affected Telegraf clients to version >=1.6.
New measurements will be created with the `name_suffix`, for example::
* `mysql_v2`
* `mysql_variables_v2`
3. Update charts, alerts, and other supporting code to the new format.
4. You can now remove the old `mysql` plugin configuration and remove old
measurements.
If you wish to remove the `name_suffix` you may use Kapacitor to copy the
historical data to the default name. Do this only after retiring the old
measurement name.
1. Use the technique described above to write to multiple locations:
```toml
[[inputs.mysql]]
servers = ["tcp(127.0.0.1:3306)/"]
metric_version = 2
[[inputs.mysql]]
name_suffix = "_v2"
metric_version = 2
servers = ["tcp(127.0.0.1:3306)/"]
```
2. Create a TICKScript to copy the historical data:
```sql
dbrp "telegraf"."autogen"
batch
|query('''
SELECT * FROM "telegraf"."autogen"."mysql_v2"
''')
.period(5m)
.every(5m)
|influxDBOut()
.database('telegraf')
.retentionPolicy('autogen')
.measurement('mysql')
```
3. Define a task for your script:
```sh
kapacitor define copy-measurement -tick copy-measurement.task
```
4. Run the task over the data you would like to migrate:
```sh
kapacitor replay-live batch -start 2018-03-30T20:00:00Z -stop 2018-04-01T12:00:00Z -rec-time -task copy-measurement
```
5. Verify copied data and repeat for other measurements.
## Metrics
* Global statuses - all numeric and boolean values of `SHOW GLOBAL STATUSES`
* Global variables - all numeric and boolean values of `SHOW GLOBAL VARIABLES`
* Slave status - metrics from `SHOW SLAVE STATUS` the metrics are gathered when
the single-source replication is on. If the multi-source replication is set,
then everything works differently, this metric does not work with multi-source
replication, unless you set `gather_all_slave_channels = true`. For MariaDB,
`mariadb_dialect = true` should be set to address the field names and commands
differences. If enable `gather_replica_status` metrics gather from command
`SHOW REPLICA STATUS`, for MariaDB will be `SHOW ALL REPLICAS STATUS`
* slave_[column name]
* Binary logs - all metrics including size and count of all binary files.
Requires to be turned on in configuration.
* binary_size_bytes(int, number)
* binary_files_count(int, number)
* Process list - connection metrics from processlist for each user. It has the
following tags
* connections(int, number)
* User Statistics - connection metrics from user statistics for each user.
It has the following fields
* access_denied
* binlog_bytes_written
* busy_time
* bytes_received
* bytes_sent
* commit_transactions
* concurrent_connections
* connected_time
* cpu_time
* denied_connections
* empty_queries
* hostlost_connections
* other_commands
* rollback_transactions
* rows_fetched
* rows_updated
* select_commands
* server
* table_rows_read
* total_connections
* total_ssl_connections
* update_commands
* user
* Perf Table IO waits - total count and time of I/O waits event for each table
and process. It has following fields:
* table_io_waits_total_fetch(float, number)
* table_io_waits_total_insert(float, number)
* table_io_waits_total_update(float, number)
* table_io_waits_total_delete(float, number)
* table_io_waits_seconds_total_fetch(float, milliseconds)
* table_io_waits_seconds_total_insert(float, milliseconds)
* table_io_waits_seconds_total_update(float, milliseconds)
* table_io_waits_seconds_total_delete(float, milliseconds)
* Perf index IO waits - total count and time of I/O waits event for each index
and process. It has following fields:
* index_io_waits_total_fetch(float, number)
* index_io_waits_seconds_total_fetch(float, milliseconds)
* index_io_waits_total_insert(float, number)
* index_io_waits_total_update(float, number)
* index_io_waits_total_delete(float, number)
* index_io_waits_seconds_total_insert(float, milliseconds)
* index_io_waits_seconds_total_update(float, milliseconds)
* index_io_waits_seconds_total_delete(float, milliseconds)
* Info schema autoincrement statuses - autoincrement fields and max values
for them. It has following fields:
* auto_increment_column(int, number)
* auto_increment_column_max(int, number)
* InnoDB metrics - all metrics of information_schema.INNODB_METRICS with a
status "enabled". For MariaDB, `mariadb_dialect = true` to use `ENABLED=1`.
* Perf table lock waits - gathers total number and time for SQL and external
lock waits events for each table and operation. It has following fields.
The unit of fields varies by the tags.
* read_normal(float, number/milliseconds)
* read_with_shared_locks(float, number/milliseconds)
* read_high_priority(float, number/milliseconds)
* read_no_insert(float, number/milliseconds)
* write_normal(float, number/milliseconds)
* write_allow_write(float, number/milliseconds)
* write_concurrent_insert(float, number/milliseconds)
* write_low_priority(float, number/milliseconds)
* read(float, number/milliseconds)
* write(float, number/milliseconds)
* Perf events waits - gathers total time and number of event waits
* events_waits_total(float, number)
* events_waits_seconds_total(float, milliseconds)
* Perf file events statuses - gathers file events statuses
* file_events_total(float,number)
* file_events_seconds_total(float, milliseconds)
* file_events_bytes_total(float, bytes)
* Perf events statements - gathers attributes of each event
* events_statements_total(float, number)
* events_statements_seconds_total(float, millieconds)
* events_statements_errors_total(float, number)
* events_statements_warnings_total(float, number)
* events_statements_rows_affected_total(float, number)
* events_statements_rows_sent_total(float, number)
* events_statements_rows_examined_total(float, number)
* events_statements_tmp_tables_total(float, number)
* events_statements_tmp_disk_tables_total(float, number)
* events_statements_sort_merge_passes_totals(float, number)
* events_statements_sort_rows_total(float, number)
* events_statements_no_index_used_total(float, number)
* Table schema - gathers statistics per schema. It has following measurements
* info_schema_table_rows(float, number)
* info_schema_table_size_data_length(float, number)
* info_schema_table_size_index_length(float, number)
* info_schema_table_size_data_free(float, number)
* info_schema_table_version(float, number)
## Tags
* All measurements has following tags
* server (the host name from which the metrics are gathered)
* Process list measurement has following tags
* user (username for whom the metrics are gathered)
* User Statistics measurement has following tags
* user (username for whom the metrics are gathered)
* Perf table IO waits measurement has following tags
* schema
* name (object name for event or process)
* Perf index IO waits has following tags
* schema
* name
* index
* Info schema autoincrement statuses has following tags
* schema
* table
* column
* Perf table lock waits has following tags
* schema
* table
* sql_lock_waits_total(fields including this tag have numeric unit)
* external_lock_waits_total(fields including this tag have numeric unit)
* sql_lock_waits_seconds_total(fields including this tag have millisecond unit)
* external_lock_waits_seconds_total(fields including this tag have
millisecond unit)
* Perf events statements has following tags
* event_name
* Perf file events statuses has following tags
* event_name
* mode
* Perf file events statements has following tags
* schema
* digest
* digest_text
* Table schema has following tags
* schema
* table
* component
* type
* engine
* row_format
* create_options
## Example Output

View file

@ -0,0 +1,42 @@
version: '3'
services:
mysql:
image: mysql:5.7
restart: always
environment:
MYSQL_ROOT_PASSWORD: telegraf
MYSQL_DATABASE: telegraf
MYSQL_USER: telegraf
MYSQL_PASSWORD: telegraf
maria:
image: mariadb
restart: always
environment:
MYSQL_ROOT_PASSWORD: telegraf
MYSQL_DATABASE: telegraf
MYSQL_USER: telegraf
MYSQL_PASSWORD: telegraf
command: mysqld --userstat=1
percona:
image: percona
restart: always
environment:
MYSQL_ROOT_PASSWORD: telegraf
MYSQL_DATABASE: telegraf
MYSQL_USER: telegraf
MYSQL_PASSWORD: telegraf
telegraf:
image: glinton/scratch
depends_on:
- mysql
- maria
- percona
volumes:
- ./telegraf.conf:/telegraf.conf
- ../../../../telegraf:/telegraf
entrypoint:
- /telegraf
- --config
- /telegraf.conf

View file

@ -0,0 +1,61 @@
# Uncomment each input as needed to test plugin
## mysql
#[[inputs.mysql]]
# servers = ["root:telegraf@tcp(mysql:3306)/"]
# gather_table_schema = true
# gather_process_list = true
# gather_user_statistics = true
# gather_info_schema_auto_inc = true
# gather_innodb_metrics = true
# gather_slave_status = true
# gather_binary_logs = false
# gather_table_io_waits = true
# gather_table_lock_waits = true
# gather_index_io_waits = true
# gather_event_waits = true
# gather_file_events_stats = true
# gather_perf_events_statements = true
# interval_slow = "30m"
# table_schema_databases = []
#
## mariadb
#[[inputs.mysql]]
# servers = ["root:telegraf@tcp(maria:3306)/"]
# gather_table_schema = true
# gather_process_list = true
# gather_user_statistics = true
# gather_info_schema_auto_inc = true
# gather_innodb_metrics = true
# gather_slave_status = true
# gather_binary_logs = false
# gather_table_io_waits = true
# gather_table_lock_waits = true
# gather_index_io_waits = true
# gather_event_waits = true
# gather_file_events_stats = true
# gather_perf_events_statements = true
# interval_slow = "30m"
# table_schema_databases = []
# percona
[[inputs.mysql]]
servers = ["root:telegraf@tcp(percona:3306)/"]
gather_table_schema = true
gather_process_list = true
gather_user_statistics = true
gather_info_schema_auto_inc = true
gather_innodb_metrics = true
gather_slave_status = true
gather_binary_logs = false
gather_table_io_waits = true
gather_table_lock_waits = true
gather_index_io_waits = true
gather_event_waits = true
gather_file_events_stats = true
gather_perf_events_statements = true
interval_slow = "30m"
table_schema_databases = []
[[outputs.file]]
files = ["stdout"]

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,576 @@
package mysql
import (
"fmt"
"regexp"
"strings"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"github.com/docker/go-connections/nat"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go/wait"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/testutil"
)
const servicePort = "3306"
func TestMysqlDefaultsToLocalIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
container := testutil.Container{
Image: "mysql",
Env: map[string]string{
"MYSQL_ALLOW_EMPTY_PASSWORD": "yes",
},
ExposedPorts: []string{servicePort},
WaitingFor: wait.ForAll(
wait.ForLog("/usr/sbin/mysqld: ready for connections").WithOccurrence(2),
wait.ForListeningPort(nat.Port(servicePort)),
),
}
require.NoError(t, container.Start(), "failed to start container")
defer container.Terminate()
dsn := fmt.Sprintf("root@tcp(%s:%s)/", container.Address, container.Ports[servicePort])
s := config.NewSecret([]byte(dsn))
defer s.Destroy()
m := &Mysql{
Servers: []*config.Secret{&s},
Log: &testutil.Logger{},
}
require.NoError(t, m.Init())
var acc testutil.Accumulator
require.NoError(t, m.Gather(&acc))
require.Empty(t, acc.Errors)
require.True(t, acc.HasMeasurement("mysql"))
}
func TestMysqlMultipleInstancesIntegration(t *testing.T) {
// Invoke Gather() from two separate configurations and
// confirm they don't interfere with each other
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
container := testutil.Container{
Image: "mysql",
Env: map[string]string{
"MYSQL_ALLOW_EMPTY_PASSWORD": "yes",
},
ExposedPorts: []string{servicePort},
WaitingFor: wait.ForAll(
wait.ForLog("/usr/sbin/mysqld: ready for connections").WithOccurrence(2),
wait.ForListeningPort(nat.Port(servicePort)),
),
}
require.NoError(t, container.Start(), "failed to start container")
defer container.Terminate()
dsn := fmt.Sprintf("root@tcp(%s:%s)/?tls=false", container.Address, container.Ports[servicePort])
s := config.NewSecret([]byte(dsn))
defer s.Destroy()
m := &Mysql{
Servers: []*config.Secret{&s},
IntervalSlow: config.Duration(30 * time.Second),
GatherGlobalVars: true,
MetricVersion: 2,
Log: &testutil.Logger{},
}
require.NoError(t, m.Init())
var acc testutil.Accumulator
require.NoError(t, m.Gather(&acc))
require.Empty(t, acc.Errors)
require.True(t, acc.HasMeasurement("mysql"))
// acc should have global variables
require.True(t, acc.HasMeasurement("mysql_variables"))
s2 := config.NewSecret([]byte(dsn))
m2 := &Mysql{
Servers: []*config.Secret{&s2},
MetricVersion: 2,
Log: &testutil.Logger{},
}
require.NoError(t, m2.Init())
var acc2 testutil.Accumulator
require.NoError(t, m2.Gather(&acc2))
require.Empty(t, acc.Errors)
require.True(t, acc2.HasMeasurement("mysql"))
// acc2 should not have global variables
require.False(t, acc2.HasMeasurement("mysql_variables"))
}
func TestPercona8Integration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
container := testutil.Container{
Image: "percona:8",
Env: map[string]string{
"MYSQL_ROOT_PASSWORD": "secret",
},
Cmd: []string{"--userstat=ON"},
ExposedPorts: []string{servicePort},
WaitingFor: wait.ForAll(
wait.ForLog("/usr/sbin/mysqld: ready for connections").WithOccurrence(2),
wait.ForListeningPort(nat.Port(servicePort)),
),
}
require.NoError(t, container.Start(), "failed to start container")
defer container.Terminate()
dsn := fmt.Sprintf("root:secret@tcp(%s:%s)/", container.Address, container.Ports[servicePort])
s := config.NewSecret([]byte(dsn))
defer s.Destroy()
plugin := &Mysql{
Servers: []*config.Secret{&s},
GatherUserStatistics: true,
Log: &testutil.Logger{},
}
require.NoError(t, plugin.Init())
var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc))
require.Empty(t, acc.Errors)
require.True(t, acc.HasMeasurement("mysql_user_stats"))
require.True(t, acc.HasFloatField("mysql_user_stats", "connected_time"))
require.True(t, acc.HasFloatField("mysql_user_stats", "cpu_time"))
require.True(t, acc.HasFloatField("mysql_user_stats", "busy_time"))
}
func TestGaleraIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
container := testutil.Container{
Image: "bitnami/mariadb-galera",
Env: map[string]string{"ALLOW_EMPTY_PASSWORD": "yes"},
ExposedPorts: []string{servicePort},
WaitingFor: wait.ForAll(
wait.ForLog("Synchronized with group, ready for connections"),
wait.ForListeningPort(nat.Port(servicePort)),
),
}
require.NoError(t, container.Start(), "failed to start container")
defer container.Terminate()
dsn := fmt.Sprintf("root@tcp(%s:%s)/", container.Address, container.Ports[servicePort])
s := config.NewSecret([]byte(dsn))
defer s.Destroy()
plugin := &Mysql{
Servers: []*config.Secret{&s},
Log: &testutil.Logger{},
}
require.NoError(t, plugin.Init())
var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc))
require.Empty(t, acc.Errors)
require.True(t, acc.HasIntField("mysql", "wsrep_ready"))
for _, m := range acc.GetTelegrafMetrics() {
if v, found := m.GetField("wsrep_ready"); found {
require.EqualValues(t, 1, v, "invalid value for field wsrep_ready")
break
}
}
}
func TestMysqlGetDSNTag(t *testing.T) {
tests := []struct {
input string
output string
}{
{
"",
"127.0.0.1:3306",
},
{
"localhost",
"127.0.0.1:3306",
},
{
"127.0.0.1",
"127.0.0.1:3306",
},
{
"tcp(192.168.1.1:3306)/",
"192.168.1.1:3306",
},
{
"tcp(localhost)/",
"localhost:3306",
},
{
"root:passwd@tcp(192.168.1.1:3306)/?tls=false",
"192.168.1.1:3306",
},
{
"root@tcp(127.0.0.1:3306)/?tls=false",
"127.0.0.1:3306",
},
{
"root:passwd@tcp(localhost:3036)/dbname?allowOldPasswords=1",
"localhost:3036",
},
{
"root:foo@bar@tcp(192.1.1.1:3306)/?tls=false",
"192.1.1.1:3306",
},
{
"root:f00@b4r@tcp(192.1.1.1:3306)/?tls=false",
"192.1.1.1:3306",
},
{
"root:fl!p11@tcp(192.1.1.1:3306)/?tls=false",
"192.1.1.1:3306",
},
}
for _, test := range tests {
output := getDSNTag(test.input)
if output != test.output {
t.Errorf("Input: %s Expected %s, got %s\n", test.input, test.output, output)
}
}
}
func TestMysqlDNSAddTimeout(t *testing.T) {
tests := []struct {
name string
input string
output string
}{
{
"empty",
"",
"tcp(127.0.0.1:3306)/?timeout=5s",
},
{
"no timeout",
"tcp(192.168.1.1:3306)/",
"tcp(192.168.1.1:3306)/?timeout=5s",
},
{
"no timeout with credentials",
"root:passwd@tcp(192.168.1.1:3306)/?tls=false",
"root:passwd@tcp(192.168.1.1:3306)/?timeout=5s&tls=false",
},
{
"with timeout and credentials",
"root:passwd@tcp(192.168.1.1:3306)/?tls=false&timeout=10s",
"root:passwd@tcp(192.168.1.1:3306)/?timeout=10s&tls=false",
},
{
"no timeout different IP",
"tcp(10.150.1.123:3306)/",
"tcp(10.150.1.123:3306)/?timeout=5s",
},
{
"no timeout with bracket credentials",
"root:@!~(*&$#%(&@#(@&#Password@tcp(10.150.1.123:3306)/",
"root:@!~(*&$#%(&@#(@&#Password@tcp(10.150.1.123:3306)/?timeout=5s",
},
{
"no timeout with strange credentials",
"root:Test3a#@!@tcp(10.150.1.123:3306)/",
"root:Test3a#@!@tcp(10.150.1.123:3306)/?timeout=5s",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := config.NewSecret([]byte(tt.input))
m := &Mysql{
Servers: []*config.Secret{&s},
}
require.NoError(t, m.Init())
require.Len(t, m.Servers, 1)
dsn, err := m.Servers[0].Get()
require.NoError(t, err)
defer dsn.Destroy()
require.Equal(t, tt.output, dsn.TemporaryString())
})
}
}
func TestMysqlTLSCustomization(t *testing.T) {
tests := []struct {
name string
input string
expected string
errmsg string
}{
{
name: "custom only param",
input: "root:passwd@tcp(192.168.1.1:3306)/?tls=custom",
expected: "root:passwd@tcp(192.168.1.1:3306)/?timeout=5s&tls=custom-<id>",
},
{
name: "custom start param",
input: "root:passwd@tcp(192.168.1.1:3306)/?tls=custom&timeout=20s",
expected: "root:passwd@tcp(192.168.1.1:3306)/?timeout=20s&tls=custom-<id>",
},
{
name: "custom end param",
input: "root:passwd@tcp(192.168.1.1:3306)/?timeout=20s&tls=custom",
expected: "root:passwd@tcp(192.168.1.1:3306)/?timeout=20s&tls=custom-<id>",
},
{
name: "custom middle param",
input: "root:passwd@tcp(192.168.1.1:3306)/?timeout=20s&tls=custom&foo=bar",
expected: "root:passwd@tcp(192.168.1.1:3306)/?timeout=20s&tls=custom-<id>&foo=bar",
},
{
name: "non-custom param false",
input: "root:passwd@tcp(192.168.1.1:3306)/?tls=false",
expected: "root:passwd@tcp(192.168.1.1:3306)/?timeout=5s&tls=false",
},
{
name: "non-custom param true",
input: "root:passwd@tcp(192.168.1.1:3306)/?tls=true",
expected: "root:passwd@tcp(192.168.1.1:3306)/?timeout=5s&tls=true",
},
{
name: "non-custom param skip-verify",
input: "root:passwd@tcp(192.168.1.1:3306)/?tls=skip-verify",
expected: "root:passwd@tcp(192.168.1.1:3306)/?timeout=5s&tls=skip-verify",
},
{
name: "non-custom param preferred",
input: "root:passwd@tcp(192.168.1.1:3306)/?tls=preferred",
expected: "root:passwd@tcp(192.168.1.1:3306)/?allowFallbackToPlaintext=true&timeout=5s&tls=preferred",
},
{
name: "non-custom param customcfg",
input: "root:passwd@tcp(192.168.1.1:3306)/?tls=customcfg",
expected: "root:passwd@tcp(192.168.1.1:3306)/?tls=customcfg",
errmsg: "invalid value / unknown config name: customcfg",
},
{
name: "non-custom param custom-cfg",
input: "root:passwd@tcp(192.168.1.1:3306)/?tls=custom-cfg",
expected: "root:passwd@tcp(192.168.1.1:3306)/?tls=custom-cfg",
errmsg: "invalid value / unknown config name: custom-cfg",
},
{
name: "non-custom param custom-cfg and following",
input: "root:passwd@tcp(192.168.1.1:3306)/?tls=custom-cfg&timeout=20s",
expected: "root:passwd@tcp(192.168.1.1:3306)/?tls=custom-cfg&timeout=20s",
errmsg: "invalid value / unknown config name: custom-cfg",
},
{
name: "non-custom param notls keyword",
input: "root:passwd@tcp(192.168.1.1:3306)/?timeout=20s&notls=custom",
expected: "root:passwd@tcp(192.168.1.1:3306)/?timeout=20s&notls=custom",
},
}
customIDRe := regexp.MustCompile(`[\?&]tls=custom-([\w-]*)(?:$|&)`)
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
s := config.NewSecret([]byte(test.input))
plugin := &Mysql{
Servers: []*config.Secret{&s},
ClientConfig: tls.ClientConfig{InsecureSkipVerify: true},
}
err := plugin.Init()
if test.errmsg != "" {
require.ErrorContains(t, err, test.errmsg)
} else {
require.NoError(t, err)
}
require.Len(t, plugin.Servers, 1)
rs, err := plugin.Servers[0].Get()
require.NoError(t, err)
defer rs.Destroy()
// Replace the `<id>` part with a potential actual ID
actual := rs.String()
expected := test.expected
if strings.Contains(expected, "<id>") {
matches := customIDRe.FindStringSubmatch(actual)
if len(matches) == 2 {
expected = strings.Replace(expected, "<id>", matches[1], 1)
}
}
require.Equal(t, expected, actual)
})
}
}
func TestGatherGlobalVariables(t *testing.T) {
db, mock, err := sqlmock.New()
require.NoError(t, err)
defer db.Close()
m := Mysql{
Log: testutil.Logger{},
MetricVersion: 2,
}
require.NoError(t, m.Init())
columns := []string{"Variable_name", "Value"}
measurement := "mysql_variables"
type fields []struct {
key string
rawValue string
parsedValue interface{}
}
type tags map[string]string
testCases := []struct {
name string
fields fields
tags tags
}{
{
"basic variables",
fields{
{"__test__string_variable", "text", "text"},
{"__test__int_variable", "5", int64(5)},
{"__test__off_variable", "OFF", int64(0)},
{"__test__on_variable", "ON", int64(1)},
{"__test__empty_variable", "", nil},
},
tags{"server": "127.0.0.1:3306"},
},
{
"version tag is present",
fields{
{"__test__string_variable", "text", "text"},
{"version", "8.0.27-0ubuntu0.20.04.1", "8.0.27-0ubuntu0.20.04.1"},
},
tags{"server": "127.0.0.1:3306", "version": "8.0.27-0ubuntu0.20.04.1"},
},
{"", fields{{"delay_key_write", "OFF", "OFF"}}, nil},
{"", fields{{"delay_key_write", "ON", "ON"}}, nil},
{"", fields{{"delay_key_write", "ALL", "ALL"}}, nil},
{"", fields{{"enforce_gtid_consistency", "OFF", "OFF"}}, nil},
{"", fields{{"enforce_gtid_consistency", "ON", "ON"}}, nil},
{"", fields{{"enforce_gtid_consistency", "WARN", "WARN"}}, nil},
{"", fields{{"event_scheduler", "NO", "NO"}}, nil},
{"", fields{{"event_scheduler", "YES", "YES"}}, nil},
{"", fields{{"event_scheduler", "DISABLED", "DISABLED"}}, nil},
{"", fields{{"have_ssl", "DISABLED", int64(0)}}, nil},
{"", fields{{"have_ssl", "YES", int64(1)}}, nil},
{"", fields{{"have_symlink", "NO", int64(0)}}, nil},
{"", fields{{"have_symlink", "DISABLED", int64(0)}}, nil},
{"", fields{{"have_symlink", "YES", int64(1)}}, nil},
{"", fields{{"session_track_gtids", "OFF", "OFF"}}, nil},
{"", fields{{"session_track_gtids", "OWN_GTID", "OWN_GTID"}}, nil},
{"", fields{{"session_track_gtids", "ALL_GTIDS", "ALL_GTIDS"}}, nil},
{"", fields{{"session_track_transaction_info", "OFF", "OFF"}}, nil},
{"", fields{{"session_track_transaction_info", "STATE", "STATE"}}, nil},
{"", fields{{"session_track_transaction_info", "CHARACTERISTICS", "CHARACTERISTICS"}}, nil},
{"", fields{{"ssl_fips_mode", "0", "0"}}, nil}, // TODO: map this to OFF or vice versa using integers
{"", fields{{"ssl_fips_mode", "1", "1"}}, nil}, // TODO: map this to ON or vice versa using integers
{"", fields{{"ssl_fips_mode", "2", "2"}}, nil}, // TODO: map this to STRICT or vice versa using integers
{"", fields{{"ssl_fips_mode", "OFF", "OFF"}}, nil},
{"", fields{{"ssl_fips_mode", "ON", "ON"}}, nil},
{"", fields{{"ssl_fips_mode", "STRICT", "STRICT"}}, nil},
{"", fields{{"use_secondary_engine", "OFF", "OFF"}}, nil},
{"", fields{{"use_secondary_engine", "ON", "ON"}}, nil},
{"", fields{{"use_secondary_engine", "FORCED", "FORCED"}}, nil},
{"", fields{{"transaction_write_set_extraction", "OFF", "OFF"}}, nil},
{"", fields{{"transaction_write_set_extraction", "MURMUR32", "MURMUR32"}}, nil},
{"", fields{{"transaction_write_set_extraction", "XXHASH64", "XXHASH64"}}, nil},
{"", fields{{"slave_skip_errors", "OFF", "OFF"}}, nil},
{"", fields{{"slave_skip_errors", "0", "0"}}, nil},
{"", fields{{"slave_skip_errors", "1007,1008,1050", "1007,1008,1050"}}, nil},
{"", fields{{"slave_skip_errors", "all", "all"}}, nil},
{"", fields{{"slave_skip_errors", "ddl_exist_errors", "ddl_exist_errors"}}, nil},
{"", fields{{"gtid_mode", "OFF", int64(0)}}, nil},
{"", fields{{"gtid_mode", "OFF_PERMISSIVE", int64(0)}}, nil},
{"", fields{{"gtid_mode", "ON", int64(1)}}, nil},
{"", fields{{"gtid_mode", "ON_PERMISSIVE", int64(1)}}, nil},
}
for i, testCase := range testCases {
if testCase.name == "" {
testCase.name = fmt.Sprintf("#%d", i)
}
t.Run(testCase.name, func(t *testing.T) {
rows := sqlmock.NewRows(columns)
for _, field := range testCase.fields {
rows.AddRow(field.key, field.rawValue)
}
mock.ExpectQuery(globalVariablesQuery).WillReturnRows(rows).RowsWillBeClosed()
acc := &testutil.Accumulator{}
err := m.gatherGlobalVariables(db, getDSNTag("test"), acc)
require.NoErrorf(t, err, "err on gatherGlobalVariables (test case %q)", testCase.name)
foundFields := map[string]bool{}
for _, metric := range acc.Metrics {
require.Equalf(t, measurement, metric.Measurement, "wrong measurement (test case %q)", testCase.name)
if testCase.tags != nil {
require.Equalf(t, testCase.tags, tags(metric.Tags), "wrong tags (test case %q)", testCase.name)
}
for key, value := range metric.Fields {
for _, field := range testCase.fields {
if field.key == key {
require.Falsef(t, foundFields[key], "field %s observed multiple times (test case %q)", key, testCase.name)
require.Equalf(t, field.parsedValue, value, "wrong value for field %s (test case %q)", key, testCase.name)
foundFields[key] = true
break
}
}
require.Truef(t, foundFields[key], "unexpected field %s=%v (test case %q)", key, value, testCase.name)
}
}
for _, field := range testCase.fields {
require.Truef(t, foundFields[field.key], "missing field %s=%v (test case %q)", field.key, field.parsedValue, testCase.name)
}
})
}
}
func TestNewNamespace(t *testing.T) {
testCases := []struct {
words []string
namespace string
}{
{
[]string{"thread", "info_scheme", "query update"},
"thread_info_scheme_query_update",
},
{
[]string{"thread", "info_scheme", "query_update"},
"thread_info_scheme_query_update",
},
{
[]string{"thread", "info", "scheme", "query", "update"},
"thread_info_scheme_query_update",
},
}
for _, cases := range testCases {
if got := newNamespace(cases.words...); got != cases.namespace {
t.Errorf("want %s, got %s", cases.namespace, got)
}
}
}

View file

@ -0,0 +1,103 @@
# Read metrics from one or many mysql servers
[[inputs.mysql]]
## specify servers via a url matching:
## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
## e.g.
## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
#
## If no servers are specified, then localhost is used as the host.
servers = ["tcp(127.0.0.1:3306)/"]
## Selects the metric output format.
##
## This option exists to maintain backwards compatibility, if you have
## existing metrics do not set or change this value until you are ready to
## migrate to the new format.
##
## If you do not have existing metrics from this plugin set to the latest
## version.
##
## Telegraf >=1.6: metric_version = 2
## <1.6: metric_version = 1 (or unset)
metric_version = 2
## if the list is empty, then metrics are gathered from all database tables
# table_schema_databases = []
## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided
## in the list above
# gather_table_schema = false
## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
# gather_process_list = false
## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
# gather_user_statistics = false
## gather auto_increment columns and max values from information schema
# gather_info_schema_auto_inc = false
## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
# gather_innodb_metrics = false
## gather metrics from all channels from SHOW SLAVE STATUS command output
# gather_all_slave_channels = false
## gather metrics from SHOW SLAVE STATUS command output
# gather_slave_status = false
## gather metrics from SHOW REPLICA STATUS command output
# gather_replica_status = false
## use SHOW ALL SLAVES STATUS command output for MariaDB
## use SHOW ALL REPLICAS STATUS command if enable gather replica status
# mariadb_dialect = false
## gather metrics from SHOW BINARY LOGS command output
# gather_binary_logs = false
## gather metrics from SHOW GLOBAL VARIABLES command output
# gather_global_variables = true
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
# gather_table_io_waits = false
## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
# gather_table_lock_waits = false
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
# gather_index_io_waits = false
## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
# gather_event_waits = false
## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
# gather_file_events_stats = false
## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
# gather_perf_events_statements = false
#
## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
# gather_perf_sum_per_acc_per_event = false
#
## list of events to be gathered for gather_perf_sum_per_acc_per_event
## in case of empty list all events will be gathered
# perf_summary_events = []
## the limits for metrics form perf_events_statements
# perf_events_statements_digest_text_limit = 120
# perf_events_statements_limit = 250
# perf_events_statements_time_limit = 86400
## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
## example: interval_slow = "30m"
# interval_slow = ""
## Optional TLS Config (used if tls=custom parameter specified in server uri)
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View file

@ -0,0 +1,199 @@
package v1
import (
"bytes"
"database/sql"
"strconv"
)
// Mapping represents a mapping between server and export names.
type Mapping struct {
OnServer string
InExport string
}
// Mappings is a list of predefined mappings between server and export names.
var Mappings = []*Mapping{
{
OnServer: "Aborted_",
InExport: "aborted_",
},
{
OnServer: "Bytes_",
InExport: "bytes_",
},
{
OnServer: "Com_",
InExport: "commands_",
},
{
OnServer: "Created_",
InExport: "created_",
},
{
OnServer: "Handler_",
InExport: "handler_",
},
{
OnServer: "Innodb_",
InExport: "innodb_",
},
{
OnServer: "Key_",
InExport: "key_",
},
{
OnServer: "Open_",
InExport: "open_",
},
{
OnServer: "Opened_",
InExport: "opened_",
},
{
OnServer: "Qcache_",
InExport: "qcache_",
},
{
OnServer: "Table_",
InExport: "table_",
},
{
OnServer: "Tokudb_",
InExport: "tokudb_",
},
{
OnServer: "Threads_",
InExport: "threads_",
},
{
OnServer: "Access_",
InExport: "access_",
},
{
OnServer: "Aria__",
InExport: "aria_",
},
{
OnServer: "Binlog__",
InExport: "binlog_",
},
{
OnServer: "Busy_",
InExport: "busy_",
},
{
OnServer: "Connection_",
InExport: "connection_",
},
{
OnServer: "Delayed_",
InExport: "delayed_",
},
{
OnServer: "Empty_",
InExport: "empty_",
},
{
OnServer: "Executed_",
InExport: "executed_",
},
{
OnServer: "Executed_",
InExport: "executed_",
},
{
OnServer: "Feature_",
InExport: "feature_",
},
{
OnServer: "Flush_",
InExport: "flush_",
},
{
OnServer: "Last_",
InExport: "last_",
},
{
OnServer: "Master_",
InExport: "master_",
},
{
OnServer: "Max_",
InExport: "max_",
},
{
OnServer: "Memory_",
InExport: "memory_",
},
{
OnServer: "Not_",
InExport: "not_",
},
{
OnServer: "Performance_",
InExport: "performance_",
},
{
OnServer: "Prepared_",
InExport: "prepared_",
},
{
OnServer: "Rows_",
InExport: "rows_",
},
{
OnServer: "Rpl_",
InExport: "rpl_",
},
{
OnServer: "Select_",
InExport: "select_",
},
{
OnServer: "Slave_",
InExport: "slave_",
},
{
OnServer: "Slow_",
InExport: "slow_",
},
{
OnServer: "Sort_",
InExport: "sort_",
},
{
OnServer: "Subquery_",
InExport: "subquery_",
},
{
OnServer: "Tc_",
InExport: "tc_",
},
{
OnServer: "Threadpool_",
InExport: "threadpool_",
},
{
OnServer: "wsrep_",
InExport: "wsrep_",
},
{
OnServer: "Uptime_",
InExport: "uptime_",
},
}
// ParseValue parses a SQL raw byte value into a float64.
// It converts "Yes"/"ON" to 1, "No"/"OFF" to 0, and attempts to parse other values as float64.
func ParseValue(value sql.RawBytes) (float64, error) {
if bytes.Equal(value, []byte("Yes")) || bytes.Equal(value, []byte("ON")) {
return 1, nil
}
if bytes.Equal(value, []byte("No")) || bytes.Equal(value, []byte("OFF")) {
return 0, nil
}
n, err := strconv.ParseFloat(string(value), 64)
return n, err
}

View file

@ -0,0 +1,178 @@
package v2
import (
"bytes"
"database/sql"
"errors"
"fmt"
"strconv"
)
type conversionFunc func(value sql.RawBytes) (interface{}, error)
// ParseInt parses the given sql.RawBytes value into an int64.
// It returns the parsed value and an error if the parsing fails.
func ParseInt(value sql.RawBytes) (interface{}, error) {
v, err := strconv.ParseInt(string(value), 10, 64)
// Ignore ErrRange. When this error is set the returned value is "the
// maximum magnitude integer of the appropriate bitSize and sign."
var numErr *strconv.NumError
if errors.As(err, &numErr) && errors.Is(numErr, strconv.ErrRange) {
return v, nil
}
return v, err
}
// ParseUint parses the given sql.RawBytes value into an uint64.
// It returns the parsed value and an error if the parsing fails.
func ParseUint(value sql.RawBytes) (interface{}, error) {
return strconv.ParseUint(string(value), 10, 64)
}
// ParseFloat parses the given sql.RawBytes value into a float64.
// It returns the parsed value and an error if the parsing fails.
func ParseFloat(value sql.RawBytes) (interface{}, error) {
return strconv.ParseFloat(string(value), 64)
}
// ParseBoolAsInteger parses the given sql.RawBytes value into an int64
// representing a boolean value. It returns 1 for "YES" or "ON" and 0 otherwise.
func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) {
if bytes.EqualFold(value, []byte("YES")) || bytes.EqualFold(value, []byte("ON")) {
return int64(1), nil
}
return int64(0), nil
}
// ParseString parses the given sql.RawBytes value into a string.
// It returns the parsed value and an error if the parsing fails.
func ParseString(value sql.RawBytes) (interface{}, error) {
return string(value), nil
}
// ParseGTIDMode parses the given sql.RawBytes value into an int64
// representing the GTID mode. It returns an error if the value is unrecognized.
func ParseGTIDMode(value sql.RawBytes) (interface{}, error) {
// https://dev.mysql.com/doc/refman/8.0/en/replication-mode-change-online-concepts.html
v := string(value)
switch v {
case "OFF":
return int64(0), nil
case "ON":
return int64(1), nil
case "OFF_PERMISSIVE":
return int64(0), nil
case "ON_PERMISSIVE":
return int64(1), nil
default:
return nil, fmt.Errorf("unrecognized gtid_mode: %q", v)
}
}
// ParseValue attempts to parse the given sql.RawBytes value into an appropriate type.
// It returns the parsed value and an error if the parsing fails.
func ParseValue(value sql.RawBytes) (interface{}, error) {
if bytes.EqualFold(value, []byte("YES")) || bytes.Equal(value, []byte("ON")) {
return int64(1), nil
}
if bytes.EqualFold(value, []byte("NO")) || bytes.Equal(value, []byte("OFF")) {
return int64(0), nil
}
if val, err := strconv.ParseInt(string(value), 10, 64); err == nil {
return val, nil
}
if val, err := strconv.ParseUint(string(value), 10, 64); err == nil {
return val, nil
}
if val, err := strconv.ParseFloat(string(value), 64); err == nil {
return val, nil
}
if len(string(value)) > 0 {
return string(value), nil
}
return nil, fmt.Errorf("unconvertible value: %q", string(value))
}
var globalStatusConversions = map[string]conversionFunc{
"innodb_available_undo_logs": ParseUint,
"innodb_buffer_pool_pages_misc": ParseUint,
"innodb_data_pending_fsyncs": ParseUint,
"ssl_ctx_verify_depth": ParseUint,
"ssl_verify_depth": ParseUint,
// see https://galeracluster.com/library/documentation/galera-status-variables.html
"wsrep_apply_oooe": ParseFloat,
"wsrep_apply_oool": ParseFloat,
"wsrep_apply_window": ParseFloat,
"wsrep_cert_deps_distance": ParseFloat,
"wsrep_cert_interval": ParseFloat,
"wsrep_commit_oooe": ParseFloat,
"wsrep_commit_oool": ParseFloat,
"wsrep_commit_window": ParseFloat,
"wsrep_flow_control_paused": ParseFloat,
"wsrep_local_index": ParseUint,
"wsrep_local_recv_queue_avg": ParseFloat,
"wsrep_local_send_queue_avg": ParseFloat,
}
var globalVariableConversions = map[string]conversionFunc{
// see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html
// see https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html
"delay_key_write": ParseString, // ON, OFF, ALL
"enforce_gtid_consistency": ParseString, // ON, OFF, WARN
"event_scheduler": ParseString, // YES, NO, DISABLED
"have_openssl": ParseBoolAsInteger, // alias for have_ssl
"have_ssl": ParseBoolAsInteger, // YES, DISABLED
"have_symlink": ParseBoolAsInteger, // YES, NO, DISABLED
"session_track_gtids": ParseString,
"session_track_transaction_info": ParseString,
"ssl_fips_mode": ParseString,
"use_secondary_engine": ParseString,
// https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html
// https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html
"transaction_write_set_extraction": ParseString,
// https://dev.mysql.com/doc/refman/5.7/en/replication-options-replica.html
// https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html
"slave_skip_errors": ParseString,
// https://dev.mysql.com/doc/refman/5.7/en/replication-options-gtids.html
// https://dev.mysql.com/doc/refman/8.0/en/replication-options-gtids.html
"gtid_mode": ParseGTIDMode,
}
// ConvertGlobalStatus converts the given key and sql.RawBytes value into an appropriate type based on globalStatusConversions.
// It returns the converted value and an error if the conversion fails.
func ConvertGlobalStatus(key string, value sql.RawBytes) (interface{}, error) {
if bytes.Equal(value, []byte("")) {
return nil, nil
}
if conv, ok := globalStatusConversions[key]; ok {
return conv(value)
}
return ParseValue(value)
}
// ConvertGlobalVariables converts the given key and sql.RawBytes value into an appropriate type based on globalVariableConversions.
// It returns the converted value and an error if the conversion fails.
func ConvertGlobalVariables(key string, value sql.RawBytes) (interface{}, error) {
if bytes.Equal(value, []byte("")) {
return nil, nil
}
if conv, ok := globalVariableConversions[key]; ok {
return conv(value)
}
return ParseValue(value)
}

View file

@ -0,0 +1,127 @@
package v2
import (
"database/sql"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestConvertGlobalStatus(t *testing.T) {
tests := []struct {
name string
key string
value sql.RawBytes
expected interface{}
expectedErr error
}{
{
name: "default",
key: "ssl_ctx_verify_depth",
value: []byte("0"),
expected: uint64(0),
expectedErr: nil,
},
{
name: "overflow int64",
key: "ssl_ctx_verify_depth",
value: []byte("18446744073709551615"),
expected: uint64(18446744073709551615),
expectedErr: nil,
},
{
name: "defined variable but unset",
key: "ssl_ctx_verify_depth",
value: []byte(""),
expected: nil,
expectedErr: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual, err := ConvertGlobalStatus(tt.key, tt.value)
require.Equal(t, tt.expectedErr, err)
require.Equal(t, tt.expected, actual)
})
}
}
func TestConvertGlobalVariables(t *testing.T) {
tests := []struct {
name string
key string
value sql.RawBytes
expected interface{}
expectedErr error
}{
{
name: "boolean type mysql<=5.6",
key: "gtid_mode",
value: []byte("ON"),
expected: int64(1),
expectedErr: nil,
},
{
name: "enum type mysql>=5.7",
key: "gtid_mode",
value: []byte("ON_PERMISSIVE"),
expected: int64(1),
expectedErr: nil,
},
{
name: "defined variable but unset",
key: "ssl_ctx_verify_depth",
value: []byte(""),
expected: nil,
expectedErr: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual, err := ConvertGlobalVariables(tt.key, tt.value)
require.Equal(t, tt.expectedErr, err)
require.Equal(t, tt.expected, actual)
})
}
}
func TestParseValue(t *testing.T) {
testCases := []struct {
rawByte sql.RawBytes
output interface{}
err string
}{
{sql.RawBytes("123"), int64(123), ""},
{sql.RawBytes("abc"), "abc", ""},
{sql.RawBytes("10.1"), 10.1, ""},
{sql.RawBytes("ON"), int64(1), ""},
{sql.RawBytes("OFF"), int64(0), ""},
{sql.RawBytes("NO"), int64(0), ""},
{sql.RawBytes("YES"), int64(1), ""},
{sql.RawBytes("No"), int64(0), ""},
{sql.RawBytes("Yes"), int64(1), ""},
{sql.RawBytes("-794"), int64(-794), ""},
{sql.RawBytes("2147483647"), int64(2147483647), ""}, // max int32
{sql.RawBytes("2147483648"), int64(2147483648), ""}, // too big for int32
{sql.RawBytes("9223372036854775807"), int64(9223372036854775807), ""}, // max int64
{sql.RawBytes("9223372036854775808"), uint64(9223372036854775808), ""}, // too big for int64
{sql.RawBytes("18446744073709551615"), uint64(18446744073709551615), ""}, // max uint64
{sql.RawBytes("18446744073709551616"), float64(18446744073709552000), ""}, // too big for uint64
{sql.RawBytes("18446744073709552333"), float64(18446744073709552000), ""}, // too big for uint64
{sql.RawBytes(""), nil, "unconvertible value"},
}
for _, cases := range testCases {
got, err := ParseValue(cases.rawByte)
if err != nil && cases.err == "" {
t.Errorf("for %q got unexpected error: %q", string(cases.rawByte), err.Error())
} else if err != nil && !strings.HasPrefix(err.Error(), cases.err) {
t.Errorf("for %q wanted error %q, got %q", string(cases.rawByte), cases.err, err.Error())
} else if err == nil && cases.err != "" {
t.Errorf("for %q did not get expected error: %s", string(cases.rawByte), cases.err)
} else if got != cases.output {
t.Errorf("for %q wanted %#v (%T), got %#v (%T)", string(cases.rawByte), cases.output, cases.output, got, got)
}
}
}