Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
26
plugins/outputs/postgresql/Dockerfile
Normal file
26
plugins/outputs/postgresql/Dockerfile
Normal file
|
@ -0,0 +1,26 @@
|
|||
# This Dockerfile can be used to build an image including the pguint extension.
|
||||
#
|
||||
# docker build -t postgres:pguint .
|
||||
# docker run -d --name postgres -p 5432:5432 -e POSTGRES_HOST_AUTH_METHOD=trust postgres:pguint
|
||||
# docker logs -f postgres 2>&1 | grep -q 'listening on IPv4 address "0.0.0.0", port 5432'
|
||||
# go test
|
||||
|
||||
# Tag from https://hub.docker.com/_/postgres?tab=tags
|
||||
ARG POSTGRES_TAG=latest
|
||||
|
||||
ARG PGUINT_REPO
|
||||
ARG PGUINT_RELEASE
|
||||
|
||||
FROM postgres:${POSTGRES_TAG}
|
||||
|
||||
RUN apt-get update && apt-get install -y build-essential curl postgresql-server-dev-${PG_MAJOR}=${PG_VERSION}
|
||||
|
||||
ENV PGUINT_REPO=${PGUINT_REPO:-phemmer/pguint}
|
||||
ENV PGUINT_REF=${PGUINT_REF:-fix-getmsgint64}
|
||||
RUN mkdir /pguint && cd /pguint && \
|
||||
curl -L https://github.com/${PGUINT_REPO}/tarball/${PGUINT_REF} | tar -zx --strip-components=1 && \
|
||||
make && make install && \
|
||||
echo 'CREATE EXTENSION uint;' > /docker-entrypoint-initdb.d/uint.sql && \
|
||||
echo '\\c template1' >> /docker-entrypoint-initdb.d/uint.sql && \
|
||||
echo 'CREATE EXTENSION uint;' >> /docker-entrypoint-initdb.d/uint.sql
|
||||
|
332
plugins/outputs/postgresql/README.md
Normal file
332
plugins/outputs/postgresql/README.md
Normal file
|
@ -0,0 +1,332 @@
|
|||
# PostgreSQL Output Plugin
|
||||
|
||||
This plugin writes metrics to a [PostgreSQL][postgresql] (or compatible) server
|
||||
managing the schema and automatically updating missing columns.
|
||||
|
||||
⭐ Telegraf v1.24.0
|
||||
🏷️ datastore
|
||||
💻 all
|
||||
|
||||
[postgresql]: https://www.postgresql.org/
|
||||
|
||||
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
|
||||
|
||||
In addition to the plugin-specific configuration settings, plugins support
|
||||
additional global and plugin configuration settings. These settings are used to
|
||||
modify metrics, tags, and field or create aliases and configure ordering, etc.
|
||||
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||
|
||||
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
|
||||
|
||||
## Startup error behavior options <!-- @/docs/includes/startup_error_behavior.md -->
|
||||
|
||||
In addition to the plugin-specific and global configuration settings the plugin
|
||||
supports options for specifying the behavior when experiencing startup errors
|
||||
using the `startup_error_behavior` setting. Available values are:
|
||||
|
||||
- `error`: Telegraf with stop and exit in case of startup errors. This is the
|
||||
default behavior.
|
||||
- `ignore`: Telegraf will ignore startup errors for this plugin and disables it
|
||||
but continues processing for all other plugins.
|
||||
- `retry`: Telegraf will try to startup the plugin in every gather or write
|
||||
cycle in case of startup errors. The plugin is disabled until
|
||||
the startup succeeds.
|
||||
- `probe`: Telegraf will probe the plugin's function (if possible) and disables the plugin
|
||||
in case probing fails. If the plugin does not support probing, Telegraf will
|
||||
behave as if `ignore` was set instead.
|
||||
|
||||
## Secret-store support
|
||||
|
||||
This plugin supports secrets from secret-stores for the `connection` option.
|
||||
See the [secret-store documentation][SECRETSTORE] for more details on how
|
||||
to use them.
|
||||
|
||||
[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# Publishes metrics to a postgresql database
|
||||
[[outputs.postgresql]]
|
||||
## Specify connection address via the standard libpq connection string:
|
||||
## host=... user=... password=... sslmode=... dbname=...
|
||||
## Or a URL:
|
||||
## postgres://[user[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||
## See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||
##
|
||||
## All connection parameters are optional. Environment vars are also supported.
|
||||
## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE
|
||||
## All supported vars can be found here:
|
||||
## https://www.postgresql.org/docs/current/libpq-envars.html
|
||||
##
|
||||
## Non-standard parameters:
|
||||
## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts.
|
||||
## pool_min_conns (default: 0) - Minimum size of connection pool.
|
||||
## pool_max_conn_lifetime (default: 0s) - Maximum connection age before closing.
|
||||
## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing.
|
||||
## pool_health_check_period (default: 0s) - Duration between health checks on idle connections.
|
||||
# connection = ""
|
||||
|
||||
## Postgres schema to use.
|
||||
# schema = "public"
|
||||
|
||||
## Store tags as foreign keys in the metrics table. Default is false.
|
||||
# tags_as_foreign_keys = false
|
||||
|
||||
## Suffix to append to table name (measurement name) for the foreign tag table.
|
||||
# tag_table_suffix = "_tag"
|
||||
|
||||
## Deny inserting metrics if the foreign tag can't be inserted.
|
||||
# foreign_tag_constraint = false
|
||||
|
||||
## Store all tags as a JSONB object in a single 'tags' column.
|
||||
# tags_as_jsonb = false
|
||||
|
||||
## Store all fields as a JSONB object in a single 'fields' column.
|
||||
# fields_as_jsonb = false
|
||||
|
||||
## Name of the timestamp column
|
||||
## NOTE: Some tools (e.g. Grafana) require the default name so be careful!
|
||||
# timestamp_column_name = "time"
|
||||
|
||||
## Type of the timestamp column
|
||||
## Currently, "timestamp without time zone" and "timestamp with time zone"
|
||||
## are supported
|
||||
# timestamp_column_type = "timestamp without time zone"
|
||||
|
||||
## Templated statements to execute when creating a new table.
|
||||
# create_templates = [
|
||||
# '''CREATE TABLE {{ .table }} ({{ .columns }})''',
|
||||
# ]
|
||||
|
||||
## Templated statements to execute when adding columns to a table.
|
||||
## Set to an empty list to disable. Points containing tags for which there is
|
||||
## no column will be skipped. Points containing fields for which there is no
|
||||
## column will have the field omitted.
|
||||
# add_column_templates = [
|
||||
# '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''',
|
||||
# ]
|
||||
|
||||
## Templated statements to execute when creating a new tag table.
|
||||
# tag_table_create_templates = [
|
||||
# '''CREATE TABLE {{ .table }} ({{ .columns }}, PRIMARY KEY (tag_id))''',
|
||||
# ]
|
||||
|
||||
## Templated statements to execute when adding columns to a tag table.
|
||||
## Set to an empty list to disable. Points containing tags for which there is
|
||||
## no column will be skipped.
|
||||
# tag_table_add_column_templates = [
|
||||
# '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''',
|
||||
# ]
|
||||
|
||||
## The postgres data type to use for storing unsigned 64-bit integer values
|
||||
## (Postgres does not have a native unsigned 64-bit integer type).
|
||||
## The value can be one of:
|
||||
## numeric - Uses the PostgreSQL "numeric" data type.
|
||||
## uint8 - Requires pguint extension (https://github.com/petere/pguint)
|
||||
# uint64_type = "numeric"
|
||||
|
||||
## When using pool_max_conns > 1, and a temporary error occurs, the query is
|
||||
## retried with an incremental backoff. This controls the maximum duration.
|
||||
# retry_max_backoff = "15s"
|
||||
|
||||
## Approximate number of tag IDs to store in in-memory cache (when using
|
||||
## tags_as_foreign_keys). This is an optimization to skip inserting known
|
||||
## tag IDs. Each entry consumes approximately 34 bytes of memory.
|
||||
# tag_cache_size = 100000
|
||||
|
||||
## Cut column names at the given length to not exceed PostgreSQL's
|
||||
## 'identifier length' limit (default: no limit)
|
||||
## (see https://www.postgresql.org/docs/current/limits.html)
|
||||
## Be careful to not create duplicate column names!
|
||||
# column_name_length_limit = 0
|
||||
|
||||
## Enable & set the log level for the Postgres driver.
|
||||
# log_level = "warn" # trace, debug, info, warn, error, none
|
||||
```
|
||||
|
||||
### Concurrency
|
||||
|
||||
By default the postgresql plugin does not utilize any concurrency. However it
|
||||
can for increased throughput. When concurrency is off, telegraf core handles
|
||||
things like retrying on failure, buffering, etc. When concurrency is used,
|
||||
these aspects have to be handled by the plugin.
|
||||
|
||||
To enable concurrent writes to the database, set the `pool_max_conns`
|
||||
connection parameter to a value >1. When enabled, incoming batches will be
|
||||
split by measurement/table name. In addition, if a batch comes in and the
|
||||
previous batch has not completed, concurrency will be used for the new batch
|
||||
as well.
|
||||
|
||||
If all connections are utilized and the pool is exhausted, further incoming
|
||||
batches will be buffered within telegraf core.
|
||||
|
||||
### Foreign tags
|
||||
|
||||
When using `tags_as_foreign_keys`, tags will be written to a separate table
|
||||
with a `tag_id` column used for joins. Each series (unique combination of tag
|
||||
values) gets its own entry in the tags table, and a unique `tag_id`.
|
||||
|
||||
## Data types
|
||||
|
||||
By default the postgresql plugin maps Influx data types to the following
|
||||
PostgreSQL types:
|
||||
|
||||
| Influx | PostgreSQL |
|
||||
|--------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------|
|
||||
| [float](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#float) | [double precision](https://www.postgresql.org/docs/current/datatype-numeric.html#DATATYPE-FLOAT) |
|
||||
| [integer](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#integer) | [bigint](https://www.postgresql.org/docs/current/datatype-numeric.html#DATATYPE-INT) |
|
||||
| [uinteger](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#uinteger) | [numeric](https://www.postgresql.org/docs/current/datatype-numeric.html#DATATYPE-NUMERIC-DECIMAL)* |
|
||||
| [string](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#string) | [text](https://www.postgresql.org/docs/current/datatype-character.html) |
|
||||
| [boolean](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#boolean) | [boolean](https://www.postgresql.org/docs/current/datatype-boolean.html) |
|
||||
| [unix timestamp](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#unix-timestamp) | [timestamp](https://www.postgresql.org/docs/current/datatype-datetime.html) |
|
||||
|
||||
It is important to note that `uinteger` (unsigned 64-bit integer) is mapped to
|
||||
the `numeric` PostgreSQL data type. The `numeric` data type is an arbitrary
|
||||
precision decimal data type that is less efficient than `bigint`. This is
|
||||
necessary as the range of values for the Influx `uinteger` data type can
|
||||
exceed `bigint`, and thus cause errors when inserting data.
|
||||
|
||||
### pguint
|
||||
|
||||
As a solution to the `uinteger`/`numeric` data type problem, there is a
|
||||
PostgreSQL extension that offers unsigned 64-bit integer support:
|
||||
[https://github.com/petere/pguint](https://github.com/petere/pguint).
|
||||
|
||||
If this extension is installed, you can enable the `unsigned_integers` config
|
||||
parameter which will cause the plugin to use the `uint8` datatype instead of
|
||||
`numeric`.
|
||||
|
||||
## Templating
|
||||
|
||||
The postgresql plugin uses templates for the schema modification SQL
|
||||
statements. This allows for complete control of the schema by the user.
|
||||
|
||||
Documentation on how to write templates can be found [sqltemplate docs][1]
|
||||
|
||||
[1]: https://pkg.go.dev/github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate
|
||||
|
||||
## Long Column Names
|
||||
|
||||
Postgres imposes a limit on the length of column identifiers, which can be found
|
||||
in the [official docs](https://www.postgresql.org/docs/current/limits.html). By
|
||||
default Telegraf does not enforce this limit as this limit can be modified on
|
||||
the server side. Furthermore, cutting off column names could lead to collisions
|
||||
if the columns are only different after the cut-off.
|
||||
|
||||
> [!WARNING]
|
||||
> Make sure you will not cause column name collisions when setting
|
||||
> `column_name_length_limit`! If in doubt, explicitly shorten the field and tag
|
||||
> names using e.g. the regexp processor.
|
||||
|
||||
### Samples
|
||||
|
||||
#### TimescaleDB
|
||||
|
||||
```toml
|
||||
tags_as_foreign_keys = true
|
||||
create_templates = [
|
||||
'''CREATE TABLE {{ .table }} ({{ .columns }})''',
|
||||
'''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '7d')''',
|
||||
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
|
||||
]
|
||||
```
|
||||
|
||||
##### Multi-node
|
||||
|
||||
```toml
|
||||
tags_as_foreign_keys = true
|
||||
create_templates = [
|
||||
'''CREATE TABLE {{ .table }} ({{ .columns }})''',
|
||||
'''SELECT create_distributed_hypertable({{ .table|quoteLiteral }}, 'time', partitioning_column => 'tag_id', number_partitions => (SELECT count(*) FROM timescaledb_information.data_nodes)::integer, replication_factor => 2, chunk_time_interval => INTERVAL '7d')''',
|
||||
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
|
||||
]
|
||||
```
|
||||
|
||||
#### Tag table with view
|
||||
|
||||
This example enables `tags_as_foreign_keys`, but creates a postgres view to
|
||||
automatically join the metric & tag tables. The metric & tag tables are stored
|
||||
in a "telegraf" schema, with the view in the "public" schema.
|
||||
|
||||
```toml
|
||||
tags_as_foreign_keys = true
|
||||
schema = "telegraf"
|
||||
create_templates = [
|
||||
'''CREATE TABLE {{ .table }} ({{ .columns }})''',
|
||||
'''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''',
|
||||
]
|
||||
add_column_templates = [
|
||||
'''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''',
|
||||
'''DROP VIEW IF EXISTS {{ .table.WithSchema "public" }}''',
|
||||
'''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''',
|
||||
]
|
||||
tag_table_add_column_templates = [
|
||||
'''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''',
|
||||
'''DROP VIEW IF EXISTS {{ .metricTable.WithSchema "public" }}''',
|
||||
'''CREATE VIEW {{ .metricTable.WithSchema "public" }} AS SELECT time, {{ (.allColumns.Tags.Concat .metricTable.Columns.Fields).Identifiers | join "," }} FROM {{ .metricTable }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''',
|
||||
]
|
||||
```
|
||||
|
||||
#### Immutable data table
|
||||
|
||||
Some PostgreSQL-compatible databases don't allow modification of table schema
|
||||
after initial creation. This example works around the limitation by creating
|
||||
a new table and then using a view to join them together.
|
||||
|
||||
```toml
|
||||
tags_as_foreign_keys = true
|
||||
schema = 'telegraf'
|
||||
create_templates = [
|
||||
'''CREATE TABLE {{ .table }} ({{ .allColumns }})''',
|
||||
'''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '7d')''',
|
||||
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
|
||||
'''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '14d')''',
|
||||
'''CREATE VIEW {{ .table.WithSuffix "_data" }} AS SELECT {{ .allColumns.Selectors | join "," }} FROM {{ .table }}''',
|
||||
'''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table.WithSuffix "_data" }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''',
|
||||
]
|
||||
add_column_templates = [
|
||||
'''ALTER TABLE {{ .table }} RENAME TO {{ (.table.WithSuffix "_" .table.Columns.Hash).WithSchema "" }}''',
|
||||
'''ALTER VIEW {{ .table.WithSuffix "_data" }} RENAME TO {{ (.table.WithSuffix "_" .table.Columns.Hash "_data").WithSchema "" }}''',
|
||||
'''DROP VIEW {{ .table.WithSchema "public" }}''',
|
||||
|
||||
'''CREATE TABLE {{ .table }} ({{ .allColumns }})''',
|
||||
'''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '7d')''',
|
||||
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
|
||||
'''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '14d')''',
|
||||
'''CREATE VIEW {{ .table.WithSuffix "_data" }} AS SELECT {{ .allColumns.Selectors | join "," }} FROM {{ .table }} UNION ALL SELECT {{ (.allColumns.Union .table.Columns).Selectors | join "," }} FROM {{ .table.WithSuffix "_" .table.Columns.Hash "_data" }}''',
|
||||
'''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table.WithSuffix "_data" }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''',
|
||||
]
|
||||
tag_table_add_column_templates = [
|
||||
'''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''',
|
||||
'''DROP VIEW {{ .metricTable.WithSchema "public" }}''',
|
||||
'''CREATE VIEW {{ .metricTable.WithSchema "public" }} AS SELECT time, {{ (.allColumns.Tags.Concat .metricTable.Columns.Fields).Identifiers | join "," }} FROM {{ .metricTable.WithSuffix "_data" }} t, {{ .table }} tt WHERE t.tag_id = tt.tag_id''',
|
||||
]
|
||||
```
|
||||
|
||||
#### Index
|
||||
|
||||
Create an index on time and tag columns for faster querying of data.
|
||||
|
||||
```toml
|
||||
create_templates = [
|
||||
'''CREATE TABLE {{ .table }} ({{ .columns }})''',
|
||||
'''CREATE INDEX ON {{ .table }} USING btree({{ .columns.Keys.Identifiers | join "," }})'''
|
||||
]
|
||||
```
|
||||
|
||||
## Error handling
|
||||
|
||||
When the plugin encounters an error writing to the database, it attempts to
|
||||
determine whether the error is temporary or permanent. An error is considered
|
||||
temporary if it's possible that retrying the write will succeed. Some examples
|
||||
of temporary errors are things like connection interruption, deadlocks, etc.
|
||||
Permanent errors are things like invalid data type, insufficient permissions,
|
||||
etc.
|
||||
|
||||
When an error is determined to be temporary, the plugin will retry the write
|
||||
with an incremental backoff.
|
||||
|
||||
When an error is determined to be permanent, the plugin will discard the
|
||||
sub-batch. The "sub-batch" is the portion of the input batch that is being
|
||||
written to the same table.
|
10
plugins/outputs/postgresql/columns.go
Normal file
10
plugins/outputs/postgresql/columns.go
Normal file
|
@ -0,0 +1,10 @@
|
|||
package postgresql
|
||||
|
||||
import "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
|
||||
|
||||
func (p *Postgresql) columnFromTag(key string, value interface{}) utils.Column {
|
||||
return utils.Column{Name: key, Type: p.derivePgDatatype(value), Role: utils.TagColType}
|
||||
}
|
||||
func (p *Postgresql) columnFromField(key string, value interface{}) utils.Column {
|
||||
return utils.Column{Name: key, Type: p.derivePgDatatype(value), Role: utils.FieldColType}
|
||||
}
|
342
plugins/outputs/postgresql/datatype_uint8.go
Normal file
342
plugins/outputs/postgresql/datatype_uint8.go
Normal file
|
@ -0,0 +1,342 @@
|
|||
package postgresql
|
||||
|
||||
// Copied from https://github.com/jackc/pgtype/blob/master/int8.go and tweaked for uint64
|
||||
/*
|
||||
Copyright (c) 2013-2021 Jack Christensen
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/jackc/pgio"
|
||||
"github.com/jackc/pgtype"
|
||||
)
|
||||
|
||||
var errUndefined = errors.New("cannot encode status undefined")
|
||||
var errBadStatus = errors.New("invalid status")
|
||||
|
||||
type Uint8 struct {
|
||||
Int uint64
|
||||
Status pgtype.Status
|
||||
}
|
||||
|
||||
func (u *Uint8) Set(src interface{}) error {
|
||||
if src == nil {
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
return nil
|
||||
}
|
||||
|
||||
if value, ok := src.(interface{ Get() interface{} }); ok {
|
||||
value2 := value.Get()
|
||||
if value2 != value {
|
||||
return u.Set(value2)
|
||||
}
|
||||
}
|
||||
|
||||
switch value := src.(type) {
|
||||
case int8:
|
||||
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
|
||||
case uint8:
|
||||
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
|
||||
case int16:
|
||||
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
|
||||
case uint16:
|
||||
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
|
||||
case int32:
|
||||
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
|
||||
case uint32:
|
||||
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
|
||||
case int64:
|
||||
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
|
||||
case uint64:
|
||||
*u = Uint8{Int: value, Status: pgtype.Present}
|
||||
case int:
|
||||
if value < 0 {
|
||||
return fmt.Errorf("%d is less than maximum value for Uint8", value)
|
||||
}
|
||||
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
|
||||
case uint:
|
||||
if uint64(value) > math.MaxInt64 {
|
||||
return fmt.Errorf("%d is greater than maximum value for Uint8", value)
|
||||
}
|
||||
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
|
||||
case string:
|
||||
num, err := strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = Uint8{Int: num, Status: pgtype.Present}
|
||||
case float32:
|
||||
if value > math.MaxInt64 {
|
||||
return fmt.Errorf("%f is greater than maximum value for Uint8", value)
|
||||
}
|
||||
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
|
||||
case float64:
|
||||
if value > math.MaxInt64 {
|
||||
return fmt.Errorf("%f is greater than maximum value for Uint8", value)
|
||||
}
|
||||
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
|
||||
case *int8:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
case *uint8:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
case *int16:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
case *uint16:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
case *int32:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
case *uint32:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
case *int64:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
case *uint64:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
case *int:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
case *uint:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
case *string:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
case *float32:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
case *float64:
|
||||
if value != nil {
|
||||
return u.Set(*value)
|
||||
}
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
default:
|
||||
return fmt.Errorf("cannot convert %v to Uint8", value)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Uint8) Get() interface{} {
|
||||
switch u.Status {
|
||||
case pgtype.Present:
|
||||
return u.Int
|
||||
case pgtype.Null:
|
||||
return nil
|
||||
default:
|
||||
return u.Status
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Uint8) AssignTo(dst interface{}) error {
|
||||
switch v := dst.(type) {
|
||||
case *int:
|
||||
*v = int(u.Int)
|
||||
case *int8:
|
||||
*v = int8(u.Int)
|
||||
case *int16:
|
||||
*v = int16(u.Int)
|
||||
case *int32:
|
||||
*v = int32(u.Int)
|
||||
case *int64:
|
||||
*v = int64(u.Int)
|
||||
case *uint:
|
||||
*v = uint(u.Int)
|
||||
case *uint8:
|
||||
*v = uint8(u.Int)
|
||||
case *uint16:
|
||||
*v = uint16(u.Int)
|
||||
case *uint32:
|
||||
*v = uint32(u.Int)
|
||||
case *uint64:
|
||||
*v = u.Int
|
||||
case *float32:
|
||||
*v = float32(u.Int)
|
||||
case *float64:
|
||||
*v = float64(u.Int)
|
||||
case *string:
|
||||
*v = strconv.FormatUint(u.Int, 10)
|
||||
case sql.Scanner:
|
||||
return v.Scan(u.Int)
|
||||
case interface{ Set(interface{}) error }:
|
||||
return v.Set(u.Int)
|
||||
default:
|
||||
return fmt.Errorf("cannot assign %v into %T", u.Int, dst)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Uint8) DecodeText(_, src []byte) error {
|
||||
if src == nil {
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
return nil
|
||||
}
|
||||
|
||||
n, err := strconv.ParseUint(string(src), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*u = Uint8{Int: n, Status: pgtype.Present}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Uint8) DecodeBinary(_, src []byte) error {
|
||||
if src == nil {
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(src) != 8 {
|
||||
return fmt.Errorf("invalid length for int8: %v", len(src))
|
||||
}
|
||||
|
||||
n := binary.BigEndian.Uint64(src)
|
||||
|
||||
*u = Uint8{Int: n, Status: pgtype.Present}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Uint8) EncodeText(_, buf []byte) ([]byte, error) {
|
||||
switch u.Status {
|
||||
case pgtype.Null:
|
||||
return nil, nil
|
||||
case pgtype.Undefined:
|
||||
return nil, errUndefined
|
||||
}
|
||||
|
||||
return append(buf, strconv.FormatUint(u.Int, 10)...), nil
|
||||
}
|
||||
|
||||
func (u *Uint8) EncodeBinary(_, buf []byte) ([]byte, error) {
|
||||
switch u.Status {
|
||||
case pgtype.Null:
|
||||
return nil, nil
|
||||
case pgtype.Undefined:
|
||||
return nil, errUndefined
|
||||
}
|
||||
|
||||
return pgio.AppendUint64(buf, u.Int), nil
|
||||
}
|
||||
|
||||
// Scan implements the database/sql Scanner interface.
|
||||
func (u *Uint8) Scan(src interface{}) error {
|
||||
if src == nil {
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
return nil
|
||||
}
|
||||
|
||||
switch src := src.(type) {
|
||||
case uint64:
|
||||
*u = Uint8{Int: src, Status: pgtype.Present}
|
||||
return nil
|
||||
case string:
|
||||
return u.DecodeText(nil, []byte(src))
|
||||
case []byte:
|
||||
srcCopy := make([]byte, len(src))
|
||||
copy(srcCopy, src)
|
||||
return u.DecodeText(nil, srcCopy)
|
||||
}
|
||||
|
||||
return fmt.Errorf("cannot scan %T", src)
|
||||
}
|
||||
|
||||
// Value implements the database/sql/driver Valuer interface.
|
||||
func (u *Uint8) Value() (driver.Value, error) {
|
||||
switch u.Status {
|
||||
case pgtype.Present:
|
||||
return int64(u.Int), nil
|
||||
case pgtype.Null:
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, errUndefined
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Uint8) MarshalJSON() ([]byte, error) {
|
||||
switch u.Status {
|
||||
case pgtype.Present:
|
||||
return []byte(strconv.FormatUint(u.Int, 10)), nil
|
||||
case pgtype.Null:
|
||||
return []byte("null"), nil
|
||||
case pgtype.Undefined:
|
||||
return nil, errUndefined
|
||||
}
|
||||
|
||||
return nil, errBadStatus
|
||||
}
|
||||
|
||||
func (u *Uint8) UnmarshalJSON(b []byte) error {
|
||||
var n *uint64
|
||||
err := json.Unmarshal(b, &n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n == nil {
|
||||
*u = Uint8{Status: pgtype.Null}
|
||||
} else {
|
||||
*u = Uint8{Int: *n, Status: pgtype.Present}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
60
plugins/outputs/postgresql/datatypes.go
Normal file
60
plugins/outputs/postgresql/datatypes.go
Normal file
|
@ -0,0 +1,60 @@
|
|||
package postgresql
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Constants for naming PostgreSQL data types both in
|
||||
// their short and long versions.
|
||||
const (
|
||||
PgBool = "boolean"
|
||||
PgSmallInt = "smallint"
|
||||
PgInteger = "integer"
|
||||
PgBigInt = "bigint"
|
||||
PgReal = "real"
|
||||
PgDoublePrecision = "double precision"
|
||||
PgNumeric = "numeric"
|
||||
PgText = "text"
|
||||
PgTimestampWithTimeZone = "timestamp with time zone"
|
||||
PgTimestampWithoutTimeZone = "timestamp without time zone"
|
||||
PgSerial = "serial"
|
||||
PgJSONb = "jsonb"
|
||||
)
|
||||
|
||||
// Types from pguint
|
||||
const (
|
||||
PgUint8 = "uint8"
|
||||
)
|
||||
|
||||
// DerivePgDatatype returns the appropriate PostgreSQL data type
|
||||
// that could hold the value.
|
||||
func (p *Postgresql) derivePgDatatype(value interface{}) string {
|
||||
if p.Uint64Type == PgUint8 {
|
||||
if _, ok := value.(uint64); ok {
|
||||
return PgUint8
|
||||
}
|
||||
}
|
||||
|
||||
switch value.(type) {
|
||||
case bool:
|
||||
return PgBool
|
||||
case uint64:
|
||||
return PgNumeric
|
||||
case int64, int, uint, uint32:
|
||||
return PgBigInt
|
||||
case int32:
|
||||
return PgInteger
|
||||
case int16, int8:
|
||||
return PgSmallInt
|
||||
case float64:
|
||||
return PgDoublePrecision
|
||||
case float32:
|
||||
return PgReal
|
||||
case string:
|
||||
return PgText
|
||||
case time.Time:
|
||||
return PgTimestampWithoutTimeZone
|
||||
default:
|
||||
return PgText
|
||||
}
|
||||
}
|
507
plugins/outputs/postgresql/postgresql.go
Normal file
507
plugins/outputs/postgresql/postgresql.go
Normal file
|
@ -0,0 +1,507 @@
|
|||
//go:generate ../../../tools/readme_config_includer/generator
|
||||
package postgresql
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coocood/freecache"
|
||||
"github.com/jackc/pgconn"
|
||||
"github.com/jackc/pgtype"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/logger"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate"
|
||||
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
|
||||
)
|
||||
|
||||
type dbh interface {
|
||||
Begin(ctx context.Context) (pgx.Tx, error)
|
||||
CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error)
|
||||
Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error)
|
||||
Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error)
|
||||
}
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type Postgresql struct {
|
||||
Connection config.Secret `toml:"connection"`
|
||||
Schema string `toml:"schema"`
|
||||
TagsAsForeignKeys bool `toml:"tags_as_foreign_keys"`
|
||||
TagTableSuffix string `toml:"tag_table_suffix"`
|
||||
ForeignTagConstraint bool `toml:"foreign_tag_constraint"`
|
||||
TagsAsJsonb bool `toml:"tags_as_jsonb"`
|
||||
FieldsAsJsonb bool `toml:"fields_as_jsonb"`
|
||||
TimestampColumnName string `toml:"timestamp_column_name"`
|
||||
TimestampColumnType string `toml:"timestamp_column_type"`
|
||||
CreateTemplates []*sqltemplate.Template `toml:"create_templates"`
|
||||
AddColumnTemplates []*sqltemplate.Template `toml:"add_column_templates"`
|
||||
TagTableCreateTemplates []*sqltemplate.Template `toml:"tag_table_create_templates"`
|
||||
TagTableAddColumnTemplates []*sqltemplate.Template `toml:"tag_table_add_column_templates"`
|
||||
Uint64Type string `toml:"uint64_type"`
|
||||
RetryMaxBackoff config.Duration `toml:"retry_max_backoff"`
|
||||
TagCacheSize int `toml:"tag_cache_size"`
|
||||
ColumnNameLenLimit int `toml:"column_name_length_limit"`
|
||||
LogLevel string `toml:"log_level"`
|
||||
Logger telegraf.Logger `toml:"-"`
|
||||
|
||||
dbContext context.Context
|
||||
dbContextCancel func()
|
||||
dbConfig *pgxpool.Config
|
||||
db *pgxpool.Pool
|
||||
tableManager *TableManager
|
||||
tagsCache *freecache.Cache
|
||||
|
||||
pguint8 *pgtype.DataType
|
||||
|
||||
writeChan chan *TableSource
|
||||
writeWaitGroup *utils.WaitGroup
|
||||
|
||||
// Column types
|
||||
timeColumn utils.Column
|
||||
tagIDColumn utils.Column
|
||||
fieldsJSONColumn utils.Column
|
||||
tagsJSONColumn utils.Column
|
||||
}
|
||||
|
||||
func (*Postgresql) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (p *Postgresql) Init() error {
|
||||
if p.TagCacheSize < 0 {
|
||||
return errors.New("invalid tag_cache_size")
|
||||
}
|
||||
|
||||
// Set the time-column name
|
||||
if p.TimestampColumnName == "" {
|
||||
p.TimestampColumnName = "time"
|
||||
}
|
||||
|
||||
switch p.TimestampColumnType {
|
||||
case "":
|
||||
p.TimestampColumnType = PgTimestampWithoutTimeZone
|
||||
case PgTimestampWithoutTimeZone, PgTimestampWithTimeZone:
|
||||
// do nothing for the valid choices
|
||||
default:
|
||||
return fmt.Errorf("unknown timestamp column type %q", p.TimestampColumnType)
|
||||
}
|
||||
|
||||
// Initialize the column prototypes
|
||||
p.timeColumn = utils.Column{
|
||||
Name: p.TimestampColumnName,
|
||||
Type: p.TimestampColumnType,
|
||||
Role: utils.TimeColType,
|
||||
}
|
||||
p.tagIDColumn = utils.Column{Name: "tag_id", Type: PgBigInt, Role: utils.TagsIDColType}
|
||||
p.fieldsJSONColumn = utils.Column{Name: "fields", Type: PgJSONb, Role: utils.FieldColType}
|
||||
p.tagsJSONColumn = utils.Column{Name: "tags", Type: PgJSONb, Role: utils.TagColType}
|
||||
|
||||
connectionSecret, err := p.Connection.Get()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting address failed: %w", err)
|
||||
}
|
||||
connection := connectionSecret.String()
|
||||
defer connectionSecret.Destroy()
|
||||
|
||||
if p.dbConfig, err = pgxpool.ParseConfig(connection); err != nil {
|
||||
return err
|
||||
}
|
||||
parsedConfig, err := pgx.ParseConfig(connection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := parsedConfig.Config.RuntimeParams["pool_max_conns"]; !ok {
|
||||
// The pgx default for pool_max_conns is 4. However we want to default to 1.
|
||||
p.dbConfig.MaxConns = 1
|
||||
}
|
||||
|
||||
if _, ok := p.dbConfig.ConnConfig.RuntimeParams["application_name"]; !ok {
|
||||
p.dbConfig.ConnConfig.RuntimeParams["application_name"] = "telegraf"
|
||||
}
|
||||
|
||||
if p.LogLevel != "" {
|
||||
p.dbConfig.ConnConfig.Logger = utils.PGXLogger{Logger: p.Logger}
|
||||
p.dbConfig.ConnConfig.LogLevel, err = pgx.LogLevelFromString(p.LogLevel)
|
||||
if err != nil {
|
||||
return errors.New("invalid log level")
|
||||
}
|
||||
}
|
||||
|
||||
switch p.Uint64Type {
|
||||
case PgNumeric:
|
||||
case PgUint8:
|
||||
p.dbConfig.AfterConnect = p.registerUint8
|
||||
default:
|
||||
return errors.New("invalid uint64_type")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connect establishes a connection to the target database and prepares the cache
|
||||
func (p *Postgresql) Connect() error {
|
||||
// Yes, we're not supposed to store the context. However since we don't receive a context, we have to.
|
||||
p.dbContext, p.dbContextCancel = context.WithCancel(context.Background())
|
||||
var err error
|
||||
p.db, err = pgxpool.ConnectConfig(p.dbContext, p.dbConfig)
|
||||
if err != nil {
|
||||
p.dbContextCancel()
|
||||
return &internal.StartupError{
|
||||
Err: err,
|
||||
Retry: true,
|
||||
}
|
||||
}
|
||||
p.tableManager = NewTableManager(p)
|
||||
|
||||
if p.TagsAsForeignKeys {
|
||||
p.tagsCache = freecache.NewCache(p.TagCacheSize * 34) // from testing, each entry consumes approx 34 bytes
|
||||
}
|
||||
|
||||
maxConns := int(p.db.Stat().MaxConns())
|
||||
if maxConns > 1 {
|
||||
p.writeChan = make(chan *TableSource)
|
||||
p.writeWaitGroup = utils.NewWaitGroup()
|
||||
for i := 0; i < maxConns; i++ {
|
||||
p.writeWaitGroup.Add(1)
|
||||
go p.writeWorker(p.dbContext)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Postgresql) registerUint8(_ context.Context, conn *pgx.Conn) error {
|
||||
if p.pguint8 == nil {
|
||||
dt := pgtype.DataType{
|
||||
// Use 'numeric' type for encoding/decoding across the wire
|
||||
// It might be more efficient to create a native pgtype.Type, but would involve a lot of code. So this is
|
||||
// probably good enough.
|
||||
Value: &Uint8{},
|
||||
Name: "uint8",
|
||||
}
|
||||
row := conn.QueryRow(p.dbContext, "SELECT oid FROM pg_type WHERE typname=$1", dt.Name)
|
||||
if err := row.Scan(&dt.OID); err != nil {
|
||||
return fmt.Errorf("retrieving OID for uint8 data type: %w", err)
|
||||
}
|
||||
p.pguint8 = &dt
|
||||
}
|
||||
|
||||
conn.ConnInfo().RegisterDataType(*p.pguint8)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the connection(s) to the database.
|
||||
func (p *Postgresql) Close() error {
|
||||
if p.writeChan != nil {
|
||||
// We're using async mode. Gracefully close with timeout.
|
||||
close(p.writeChan)
|
||||
select {
|
||||
case <-p.writeWaitGroup.C():
|
||||
case <-time.NewTimer(time.Second * 5).C:
|
||||
p.Logger.Warnf("Shutdown timeout expired while waiting for metrics to flush. Some metrics may not be written to database.")
|
||||
}
|
||||
}
|
||||
|
||||
// Die!
|
||||
p.dbContextCancel()
|
||||
if p.db != nil {
|
||||
p.db.Close()
|
||||
}
|
||||
p.tableManager = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Postgresql) Write(metrics []telegraf.Metric) error {
|
||||
if p.tagsCache != nil {
|
||||
// gather at the start of write so there's less chance of any async operations ongoing
|
||||
p.Logger.Debugf("cache: size=%d hit=%d miss=%d full=%d\n",
|
||||
p.tagsCache.EntryCount(),
|
||||
p.tagsCache.HitCount(),
|
||||
p.tagsCache.MissCount(),
|
||||
p.tagsCache.EvacuateCount(),
|
||||
)
|
||||
p.tagsCache.ResetStatistics()
|
||||
}
|
||||
|
||||
tableSources := NewTableSources(p, metrics)
|
||||
|
||||
var err error
|
||||
if p.db.Stat().MaxConns() > 1 {
|
||||
p.writeConcurrent(tableSources)
|
||||
} else {
|
||||
err = p.writeSequential(tableSources)
|
||||
}
|
||||
if err != nil {
|
||||
var pgErr *pgconn.PgError
|
||||
if errors.As(err, &pgErr) {
|
||||
// PgError doesn't include .Detail in Error(), so we concat it onto .Message.
|
||||
if pgErr.Detail != "" {
|
||||
pgErr.Message += "; " + pgErr.Detail
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Postgresql) writeSequential(tableSources map[string]*TableSource) error {
|
||||
tx, err := p.db.Begin(p.dbContext)
|
||||
if err != nil {
|
||||
return fmt.Errorf("starting transaction: %w", err)
|
||||
}
|
||||
defer tx.Rollback(p.dbContext) //nolint:errcheck // In case of failure during commit, "err" from commit will be returned
|
||||
|
||||
for _, tableSource := range tableSources {
|
||||
sp := tx
|
||||
if len(tableSources) > 1 {
|
||||
// wrap each sub-batch in a savepoint so that if a permanent error is received, we can drop just that one sub-batch, and insert everything else.
|
||||
sp, err = tx.Begin(p.dbContext)
|
||||
if err != nil {
|
||||
return fmt.Errorf("starting savepoint: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := p.writeMetricsFromMeasure(p.dbContext, sp, tableSource)
|
||||
if err != nil {
|
||||
if isTempError(err) {
|
||||
// return so that telegraf will retry the whole batch
|
||||
return err
|
||||
}
|
||||
p.Logger.Errorf("write error (permanent, dropping sub-batch): %v", err)
|
||||
if len(tableSources) == 1 {
|
||||
return nil
|
||||
}
|
||||
// drop this one sub-batch and continue trying the rest
|
||||
if err := sp.Rollback(p.dbContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// savepoints do not need to be committed (released), so save the round trip and skip it
|
||||
}
|
||||
|
||||
if err := tx.Commit(p.dbContext); err != nil {
|
||||
return fmt.Errorf("committing transaction: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Postgresql) writeConcurrent(tableSources map[string]*TableSource) {
|
||||
for _, tableSource := range tableSources {
|
||||
select {
|
||||
case p.writeChan <- tableSource:
|
||||
case <-p.dbContext.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Postgresql) writeWorker(ctx context.Context) {
|
||||
defer p.writeWaitGroup.Done()
|
||||
for {
|
||||
select {
|
||||
case tableSource, ok := <-p.writeChan:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := p.writeRetry(ctx, tableSource); err != nil {
|
||||
p.Logger.Errorf("write error (permanent, dropping sub-batch): %v", err)
|
||||
}
|
||||
case <-p.dbContext.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isTempError reports whether the error received during a metric write operation is temporary or permanent.
|
||||
// A temporary error is one that if the write were retried at a later time, that it might succeed.
|
||||
// Note however that this applies to the transaction as a whole, not the individual operation. Meaning for example a
|
||||
// write might come in that needs a new table created, but another worker already created the table in between when we
|
||||
// checked for it, and tried to create it. In this case, the operation error is permanent, as we can try `CREATE TABLE`
|
||||
// again and it will still fail. But if we retry the transaction from scratch, when we perform the table check we'll see
|
||||
// it exists, so we consider the error temporary.
|
||||
func isTempError(err error) bool {
|
||||
var pgErr *pgconn.PgError
|
||||
if errors.As(err, &pgErr); pgErr != nil {
|
||||
// https://www.postgresql.org/docs/12/errcodes-appendix.html
|
||||
errClass := pgErr.Code[:2]
|
||||
switch errClass {
|
||||
case "23": // Integrity Constraint Violation
|
||||
// 23505 - unique_violation
|
||||
if pgErr.Code == "23505" && strings.Contains(err.Error(), "pg_type_typname_nsp_index") {
|
||||
// Happens when you try to create 2 tables simultaneously.
|
||||
return true
|
||||
}
|
||||
case "25": // Invalid Transaction State
|
||||
// If we're here, this is a bug, but recoverable
|
||||
return true
|
||||
case "40": // Transaction Rollback
|
||||
if pgErr.Code == "40P01" { // deadlock_detected
|
||||
return true
|
||||
}
|
||||
case "42": // Syntax Error or Access Rule Violation
|
||||
switch pgErr.Code {
|
||||
case "42701": // duplicate_column
|
||||
return true
|
||||
case "42P07": // duplicate_table
|
||||
return true
|
||||
}
|
||||
case "53": // Insufficient Resources
|
||||
return true
|
||||
case "57": // Operator Intervention
|
||||
switch pgErr.Code {
|
||||
case "57014": // query_cancelled
|
||||
// This one is a bit of a mess. This code comes back when PGX cancels the query. Such as when PGX can't
|
||||
// convert to the column's type. So even though the error was originally generated by PGX, we get the
|
||||
// error from Postgres.
|
||||
return false
|
||||
case "57P04": // database_dropped
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
// Assume that any other error that comes from postgres is a permanent error
|
||||
return false
|
||||
}
|
||||
|
||||
var tempErr interface{ Temporary() bool }
|
||||
if errors.As(err, &tempErr) {
|
||||
return tempErr.Temporary()
|
||||
}
|
||||
|
||||
// Assume that any other error is permanent.
|
||||
// This may mean that we incorrectly discard data that could have been retried, but the alternative is that we get
|
||||
// stuck retrying data that will never succeed, causing good data to be dropped because the buffer fills up.
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Postgresql) writeRetry(ctx context.Context, tableSource *TableSource) error {
|
||||
backoff := time.Duration(0)
|
||||
for {
|
||||
err := p.writeMetricsFromMeasure(ctx, p.db, tableSource)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !isTempError(err) {
|
||||
return err
|
||||
}
|
||||
p.Logger.Errorf("write error (retry in %s): %v", backoff, err)
|
||||
tableSource.Reset()
|
||||
time.Sleep(backoff)
|
||||
|
||||
if backoff == 0 {
|
||||
backoff = time.Millisecond * 250
|
||||
} else {
|
||||
backoff *= 2
|
||||
if backoff > time.Duration(p.RetryMaxBackoff) {
|
||||
backoff = time.Duration(p.RetryMaxBackoff)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Writes the metrics from a specified measure. All the provided metrics must belong to the same measurement.
|
||||
func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableSource *TableSource) error {
|
||||
err := p.tableManager.MatchSource(ctx, db, tableSource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.TagsAsForeignKeys {
|
||||
if err = writeTagTable(ctx, db, tableSource); err != nil {
|
||||
if p.ForeignTagConstraint {
|
||||
return fmt.Errorf("writing to tag table %q: %w", tableSource.Name()+p.TagTableSuffix, err)
|
||||
}
|
||||
// log and continue. As the admin can correct the issue, and tags don't change over time, they can be
|
||||
// added from future metrics after issue is corrected.
|
||||
p.Logger.Errorf("writing to tag table %q: %s", tableSource.Name()+p.TagTableSuffix, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
fullTableName := utils.FullTableName(p.Schema, tableSource.Name())
|
||||
if _, err := db.CopyFrom(ctx, fullTableName, tableSource.ColumnNames(), tableSource); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeTagTable(ctx context.Context, db dbh, tableSource *TableSource) error {
|
||||
ttsrc := NewTagTableSource(tableSource)
|
||||
|
||||
// Check whether we have any tags to insert
|
||||
if !ttsrc.Next() {
|
||||
return nil
|
||||
}
|
||||
ttsrc.Reset()
|
||||
|
||||
// need a transaction so that if it errors, we don't roll back the parent transaction, just the tags
|
||||
tx, err := db.Begin(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback(ctx) //nolint:errcheck // In case of failure during commit, "err" from commit will be returned
|
||||
|
||||
ident := pgx.Identifier{ttsrc.postgresql.Schema, ttsrc.Name()}
|
||||
identTemp := pgx.Identifier{ttsrc.Name() + "_temp"}
|
||||
sql := fmt.Sprintf("CREATE TEMP TABLE %s (LIKE %s) ON COMMIT DROP", identTemp.Sanitize(), ident.Sanitize())
|
||||
if _, err := tx.Exec(ctx, sql); err != nil {
|
||||
return fmt.Errorf("creating tags temp table: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.CopyFrom(ctx, identTemp, ttsrc.ColumnNames(), ttsrc); err != nil {
|
||||
return fmt.Errorf("copying into tags temp table: %w", err)
|
||||
}
|
||||
|
||||
insert := fmt.Sprintf("INSERT INTO %s SELECT * FROM %s ORDER BY tag_id ON CONFLICT (tag_id) DO NOTHING", ident.Sanitize(), identTemp.Sanitize())
|
||||
if _, err := tx.Exec(ctx, insert); err != nil {
|
||||
return fmt.Errorf("inserting into tags table: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ttsrc.UpdateCache()
|
||||
return nil
|
||||
}
|
||||
|
||||
func newPostgresql() *Postgresql {
|
||||
p := &Postgresql{
|
||||
Schema: "public",
|
||||
TagTableSuffix: "_tag",
|
||||
TagCacheSize: 100000,
|
||||
Uint64Type: PgNumeric,
|
||||
CreateTemplates: []*sqltemplate.Template{{}},
|
||||
AddColumnTemplates: []*sqltemplate.Template{{}},
|
||||
TagTableCreateTemplates: []*sqltemplate.Template{{}},
|
||||
TagTableAddColumnTemplates: []*sqltemplate.Template{{}},
|
||||
RetryMaxBackoff: config.Duration(time.Second * 15),
|
||||
Logger: logger.New("outputs", "postgresql", ""),
|
||||
LogLevel: "warn",
|
||||
}
|
||||
|
||||
p.CreateTemplates[0].UnmarshalText([]byte(`CREATE TABLE {{ .table }} ({{ .columns }})`))
|
||||
p.AddColumnTemplates[0].UnmarshalText([]byte(`ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}`))
|
||||
p.TagTableCreateTemplates[0].UnmarshalText([]byte(`CREATE TABLE {{ .table }} ({{ .columns }}, PRIMARY KEY (tag_id))`))
|
||||
p.TagTableAddColumnTemplates[0].UnmarshalText(
|
||||
[]byte(`ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}`),
|
||||
)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("postgresql", func() telegraf.Output { return newPostgresql() })
|
||||
}
|
112
plugins/outputs/postgresql/postgresql_bench_test.go
Normal file
112
plugins/outputs/postgresql/postgresql_bench_test.go
Normal file
|
@ -0,0 +1,112 @@
|
|||
package postgresql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
func BenchmarkPostgresql_sequential(b *testing.B) {
|
||||
gen := batchGenerator(batchGeneratorArgs{ctx, b, 1000, 3, 8, 12, 100, 2})
|
||||
benchmarkPostgresql(b, gen, 1, true)
|
||||
}
|
||||
func BenchmarkPostgresql_concurrent(b *testing.B) {
|
||||
gen := batchGenerator(batchGeneratorArgs{ctx, b, 1000, 3, 8, 12, 100, 2})
|
||||
benchmarkPostgresql(b, gen, 10, true)
|
||||
}
|
||||
|
||||
func benchmarkPostgresql(b *testing.B, gen <-chan []telegraf.Metric, concurrency int, foreignTags bool) {
|
||||
p, err := newPostgresqlTest(b)
|
||||
require.NoError(b, err)
|
||||
|
||||
connection, err := p.Connection.Get()
|
||||
require.NoError(b, err)
|
||||
p.Connection = config.NewSecret([]byte(connection.String() + fmt.Sprintf(" pool_max_conns=%d", concurrency)))
|
||||
connection.Destroy()
|
||||
|
||||
p.TagsAsForeignKeys = foreignTags
|
||||
p.LogLevel = ""
|
||||
require.NoError(b, p.Init())
|
||||
if err := p.Connect(); err != nil {
|
||||
b.Fatalf("Error: %s", err)
|
||||
}
|
||||
|
||||
metricCount := 0
|
||||
|
||||
b.ResetTimer()
|
||||
tStart := time.Now()
|
||||
for i := 0; i < b.N; i++ {
|
||||
batch := <-gen
|
||||
if err := p.Write(batch); err != nil {
|
||||
b.Fatalf("Error: %s", err)
|
||||
}
|
||||
metricCount += len(batch)
|
||||
}
|
||||
_ = p.Close()
|
||||
b.StopTimer()
|
||||
tStop := time.Now()
|
||||
b.ReportMetric(float64(metricCount)/tStop.Sub(tStart).Seconds(), "metrics/s")
|
||||
}
|
||||
|
||||
type batchGeneratorArgs struct {
|
||||
ctx context.Context
|
||||
b *testing.B
|
||||
batchSize int
|
||||
numTables int
|
||||
numTags int
|
||||
numFields int
|
||||
tagCardinality int
|
||||
fieldCardinality int
|
||||
}
|
||||
|
||||
// tagCardinality counts all the tag keys & values as one element. fieldCardinality counts all the field keys (not values) as one element.
|
||||
func batchGenerator(args batchGeneratorArgs) <-chan []telegraf.Metric {
|
||||
tagSets := make([]MSS, 0, args.tagCardinality)
|
||||
for i := 0; i < args.tagCardinality; i++ {
|
||||
tags := MSS{}
|
||||
for j := 0; j < args.numTags; j++ {
|
||||
tags[fmt.Sprintf("tag_%d", j)] = strconv.Itoa(rand.Int())
|
||||
}
|
||||
tagSets = append(tagSets, tags)
|
||||
}
|
||||
|
||||
metricChan := make(chan []telegraf.Metric, 32)
|
||||
go func() {
|
||||
for {
|
||||
batch := make([]telegraf.Metric, 0, args.batchSize)
|
||||
for i := 0; i < args.batchSize; i++ {
|
||||
tableName := args.b.Name() + "_" + strconv.Itoa(rand.Intn(args.numTables))
|
||||
|
||||
tags := tagSets[rand.Intn(len(tagSets))]
|
||||
|
||||
m := metric.New(tableName, tags, nil, time.Now())
|
||||
m.AddTag("tableName", tableName) // ensure the tag set is unique to this table. Just in case...
|
||||
|
||||
// We do field cardinality by randomizing the name of the final field to an integer < cardinality.
|
||||
for j := 0; j < args.numFields-1; j++ { // use -1 to reserve the last field for cardinality
|
||||
m.AddField("f"+strconv.Itoa(j), rand.Int())
|
||||
}
|
||||
m.AddField("f"+strconv.Itoa(rand.Intn(args.fieldCardinality)), rand.Int())
|
||||
|
||||
batch = append(batch, m)
|
||||
}
|
||||
|
||||
select {
|
||||
case metricChan <- batch:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return metricChan
|
||||
}
|
1214
plugins/outputs/postgresql/postgresql_test.go
Normal file
1214
plugins/outputs/postgresql/postgresql_test.go
Normal file
File diff suppressed because it is too large
Load diff
97
plugins/outputs/postgresql/sample.conf
Normal file
97
plugins/outputs/postgresql/sample.conf
Normal file
|
@ -0,0 +1,97 @@
|
|||
# Publishes metrics to a postgresql database
|
||||
[[outputs.postgresql]]
|
||||
## Specify connection address via the standard libpq connection string:
|
||||
## host=... user=... password=... sslmode=... dbname=...
|
||||
## Or a URL:
|
||||
## postgres://[user[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||
## See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||
##
|
||||
## All connection parameters are optional. Environment vars are also supported.
|
||||
## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE
|
||||
## All supported vars can be found here:
|
||||
## https://www.postgresql.org/docs/current/libpq-envars.html
|
||||
##
|
||||
## Non-standard parameters:
|
||||
## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts.
|
||||
## pool_min_conns (default: 0) - Minimum size of connection pool.
|
||||
## pool_max_conn_lifetime (default: 0s) - Maximum connection age before closing.
|
||||
## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing.
|
||||
## pool_health_check_period (default: 0s) - Duration between health checks on idle connections.
|
||||
# connection = ""
|
||||
|
||||
## Postgres schema to use.
|
||||
# schema = "public"
|
||||
|
||||
## Store tags as foreign keys in the metrics table. Default is false.
|
||||
# tags_as_foreign_keys = false
|
||||
|
||||
## Suffix to append to table name (measurement name) for the foreign tag table.
|
||||
# tag_table_suffix = "_tag"
|
||||
|
||||
## Deny inserting metrics if the foreign tag can't be inserted.
|
||||
# foreign_tag_constraint = false
|
||||
|
||||
## Store all tags as a JSONB object in a single 'tags' column.
|
||||
# tags_as_jsonb = false
|
||||
|
||||
## Store all fields as a JSONB object in a single 'fields' column.
|
||||
# fields_as_jsonb = false
|
||||
|
||||
## Name of the timestamp column
|
||||
## NOTE: Some tools (e.g. Grafana) require the default name so be careful!
|
||||
# timestamp_column_name = "time"
|
||||
|
||||
## Type of the timestamp column
|
||||
## Currently, "timestamp without time zone" and "timestamp with time zone"
|
||||
## are supported
|
||||
# timestamp_column_type = "timestamp without time zone"
|
||||
|
||||
## Templated statements to execute when creating a new table.
|
||||
# create_templates = [
|
||||
# '''CREATE TABLE {{ .table }} ({{ .columns }})''',
|
||||
# ]
|
||||
|
||||
## Templated statements to execute when adding columns to a table.
|
||||
## Set to an empty list to disable. Points containing tags for which there is
|
||||
## no column will be skipped. Points containing fields for which there is no
|
||||
## column will have the field omitted.
|
||||
# add_column_templates = [
|
||||
# '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''',
|
||||
# ]
|
||||
|
||||
## Templated statements to execute when creating a new tag table.
|
||||
# tag_table_create_templates = [
|
||||
# '''CREATE TABLE {{ .table }} ({{ .columns }}, PRIMARY KEY (tag_id))''',
|
||||
# ]
|
||||
|
||||
## Templated statements to execute when adding columns to a tag table.
|
||||
## Set to an empty list to disable. Points containing tags for which there is
|
||||
## no column will be skipped.
|
||||
# tag_table_add_column_templates = [
|
||||
# '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''',
|
||||
# ]
|
||||
|
||||
## The postgres data type to use for storing unsigned 64-bit integer values
|
||||
## (Postgres does not have a native unsigned 64-bit integer type).
|
||||
## The value can be one of:
|
||||
## numeric - Uses the PostgreSQL "numeric" data type.
|
||||
## uint8 - Requires pguint extension (https://github.com/petere/pguint)
|
||||
# uint64_type = "numeric"
|
||||
|
||||
## When using pool_max_conns > 1, and a temporary error occurs, the query is
|
||||
## retried with an incremental backoff. This controls the maximum duration.
|
||||
# retry_max_backoff = "15s"
|
||||
|
||||
## Approximate number of tag IDs to store in in-memory cache (when using
|
||||
## tags_as_foreign_keys). This is an optimization to skip inserting known
|
||||
## tag IDs. Each entry consumes approximately 34 bytes of memory.
|
||||
# tag_cache_size = 100000
|
||||
|
||||
## Cut column names at the given length to not exceed PostgreSQL's
|
||||
## 'identifier length' limit (default: no limit)
|
||||
## (see https://www.postgresql.org/docs/current/limits.html)
|
||||
## Be careful to not create duplicate column names!
|
||||
# column_name_length_limit = 0
|
||||
|
||||
## Enable & set the log level for the Postgres driver.
|
||||
# log_level = "warn" # trace, debug, info, warn, error, none
|
405
plugins/outputs/postgresql/sqltemplate/template.go
Normal file
405
plugins/outputs/postgresql/sqltemplate/template.go
Normal file
|
@ -0,0 +1,405 @@
|
|||
// Package sqltemplate
|
||||
/*
|
||||
Templates are used for creation of the SQL used when creating and modifying tables. These templates are specified within
|
||||
the configuration as the parameters 'create_templates', 'add_column_templates', 'tag_table_create_templates', and
|
||||
'tag_table_add_column_templates'.
|
||||
|
||||
The templating functionality behaves the same in all cases. However, the variables will differ.
|
||||
|
||||
# Variables
|
||||
|
||||
The following variables are available within all template executions:
|
||||
|
||||
- table - A Table object referring to the current table being
|
||||
created/modified.
|
||||
|
||||
- columns - A Columns object of the new columns being added to the
|
||||
table (all columns in the case of a new table, and new columns in the case
|
||||
of existing table).
|
||||
|
||||
- allColumns - A Columns object of all the columns (both old and new)
|
||||
of the table. In the case of a new table, this is the same as `columns`.
|
||||
|
||||
- metricTable - A Table object referring to the table containing the
|
||||
fields. In the case of TagsAsForeignKeys and `table` is the tag table, then
|
||||
`metricTable` is the table using this one for its tags.
|
||||
|
||||
- tagTable - A Table object referring to the table containing the
|
||||
tags. In the case of TagsAsForeignKeys and `table` is the metrics table,
|
||||
then `tagTable` is the table containing the tags for it.
|
||||
|
||||
Each object has helper methods that may be used within the template. See the documentation for the appropriate type.
|
||||
|
||||
When the object is interpolated without a helper, it is automatically converted to a string through its String() method.
|
||||
|
||||
# Functions
|
||||
|
||||
All the functions provided by the Sprig library (http://masterminds.github.io/sprig/) are available within template executions.
|
||||
|
||||
In addition, the following functions are also available:
|
||||
|
||||
- quoteIdentifier - Quotes the input string as a Postgres identifier.
|
||||
|
||||
- quoteLiteral - Quotes the input string as a Postgres literal.
|
||||
|
||||
# Examples
|
||||
|
||||
The default templates show basic usage. When left unconfigured, it is the equivalent of:
|
||||
|
||||
[outputs.postgresql]
|
||||
create_templates = [
|
||||
'''CREATE TABLE {{.table}} ({{.columns}})''',
|
||||
]
|
||||
add_column_templates = [
|
||||
'''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''',
|
||||
]
|
||||
tag_table_create_templates = [
|
||||
'''CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))'''
|
||||
]
|
||||
tag_table_add_column_templates = [
|
||||
'''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''',
|
||||
]
|
||||
|
||||
A simple example for usage with TimescaleDB would be:
|
||||
|
||||
[outputs.postgresql]
|
||||
create_templates = [
|
||||
'''CREATE TABLE {{ .table }} ({{ .allColumns }})''',
|
||||
'''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1d')''',
|
||||
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
|
||||
'''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2h')''',
|
||||
]
|
||||
|
||||
...where the defaults for the other templates would be automatically applied.
|
||||
|
||||
A very complex example for versions of TimescaleDB which don't support adding columns to compressed hypertables (v<2.1.0),
|
||||
using views and unions to emulate the functionality, would be:
|
||||
|
||||
[outputs.postgresql]
|
||||
schema = "telegraf"
|
||||
create_templates = [
|
||||
'''CREATE TABLE {{ .table }} ({{ .allColumns }})''',
|
||||
'''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1d')''',
|
||||
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
|
||||
'''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2d')''',
|
||||
'''CREATE VIEW {{ .table.WithSuffix "_data" }} AS
|
||||
SELECT {{ .allColumns.Selectors | join "," }} FROM {{ .table }}''',
|
||||
'''CREATE VIEW {{ .table.WithSchema "public" }} AS
|
||||
SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }}
|
||||
FROM {{ .table.WithSuffix "_data" }} t, {{ .tagTable }} tt
|
||||
WHERE t.tag_id = tt.tag_id''',
|
||||
]
|
||||
add_column_templates = [
|
||||
'''ALTER TABLE {{ .table }} RENAME TO {{ (.table.WithSuffix "_" .table.Columns.Hash).WithSchema "" }}''',
|
||||
'''ALTER VIEW {{ .table.WithSuffix "_data" }} RENAME TO {{ (.table.WithSuffix "_" .table.Columns.Hash "_data").WithSchema "" }}''',
|
||||
'''DROP VIEW {{ .table.WithSchema "public" }}''',
|
||||
|
||||
'''CREATE TABLE {{ .table }} ({{ .allColumns }})''',
|
||||
'''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1d')''',
|
||||
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
|
||||
'''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2d')''',
|
||||
'''CREATE VIEW {{ .table.WithSuffix "_data" }} AS
|
||||
SELECT {{ .allColumns.Selectors | join "," }}
|
||||
FROM {{ .table }}
|
||||
UNION ALL
|
||||
SELECT {{ (.allColumns.Union .table.Columns).Selectors | join "," }}
|
||||
FROM {{ .table.WithSuffix "_" .table.Columns.Hash "_data" }}''',
|
||||
'''CREATE VIEW {{ .table.WithSchema "public" }}
|
||||
AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }}
|
||||
FROM {{ .table.WithSuffix "_data" }} t, {{ .tagTable }} tt
|
||||
WHERE t.tag_id = tt.tag_id''',
|
||||
]
|
||||
*/
|
||||
package sqltemplate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base32"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Masterminds/sprig"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
|
||||
)
|
||||
|
||||
var templateFuncs = map[string]interface{}{
|
||||
"quoteIdentifier": QuoteIdentifier,
|
||||
"quoteLiteral": QuoteLiteral,
|
||||
}
|
||||
|
||||
func asString(obj interface{}) string {
|
||||
switch obj := obj.(type) {
|
||||
case string:
|
||||
return obj
|
||||
case []byte:
|
||||
return string(obj)
|
||||
case fmt.Stringer:
|
||||
return obj.String()
|
||||
default:
|
||||
return fmt.Sprintf("%v", obj)
|
||||
}
|
||||
}
|
||||
|
||||
// QuoteIdentifier quotes the given string as a Postgres identifier (double-quotes the value).
|
||||
//
|
||||
// QuoteIdentifier is accessible within templates as 'quoteIdentifier'.
|
||||
func QuoteIdentifier(name interface{}) string {
|
||||
return utils.QuoteIdentifier(asString(name))
|
||||
}
|
||||
|
||||
// QuoteLiteral quotes the given string as a Postgres literal (single-quotes the value).
|
||||
//
|
||||
// QuoteLiteral is accessible within templates as 'quoteLiteral'.
|
||||
func QuoteLiteral(str interface{}) string {
|
||||
return utils.QuoteLiteral(asString(str))
|
||||
}
|
||||
|
||||
// Table is an object which represents a Postgres table.
|
||||
type Table struct {
|
||||
Schema string
|
||||
Name string
|
||||
Columns Columns
|
||||
}
|
||||
|
||||
func NewTable(schemaName, tableName string, columns []utils.Column) *Table {
|
||||
if tableName == "" {
|
||||
return nil
|
||||
}
|
||||
return &Table{
|
||||
Schema: schemaName,
|
||||
Name: tableName,
|
||||
Columns: NewColumns(columns),
|
||||
}
|
||||
}
|
||||
|
||||
// String returns the table's fully qualified & quoted identifier (schema+table).
|
||||
func (tbl *Table) String() string {
|
||||
return tbl.Identifier()
|
||||
}
|
||||
|
||||
// Identifier returns the table's fully qualified & quoted identifier (schema+table).
|
||||
//
|
||||
// If schema is empty, it is omitted from the result.
|
||||
func (tbl *Table) Identifier() string {
|
||||
if tbl.Schema == "" {
|
||||
return QuoteIdentifier(tbl.Name)
|
||||
}
|
||||
return QuoteIdentifier(tbl.Schema) + "." + QuoteIdentifier(tbl.Name)
|
||||
}
|
||||
|
||||
// WithSchema returns a copy of the Table object, but with the schema replaced by the given value.
|
||||
func (tbl *Table) WithSchema(name string) *Table {
|
||||
tblNew := &Table{}
|
||||
*tblNew = *tbl
|
||||
tblNew.Schema = name
|
||||
return tblNew
|
||||
}
|
||||
|
||||
// WithName returns a copy of the Table object, but with the name replaced by the given value.
|
||||
func (tbl *Table) WithName(name string) *Table {
|
||||
tblNew := &Table{}
|
||||
*tblNew = *tbl
|
||||
tblNew.Name = name
|
||||
return tblNew
|
||||
}
|
||||
|
||||
// WithSuffix returns a copy of the Table object, but with the name suffixed with the given value.
|
||||
func (tbl *Table) WithSuffix(suffixes ...string) *Table {
|
||||
tblNew := &Table{}
|
||||
*tblNew = *tbl
|
||||
tblNew.Name += strings.Join(suffixes, "")
|
||||
return tblNew
|
||||
}
|
||||
|
||||
// A Column is an object which represents a Postgres column.
|
||||
type Column utils.Column
|
||||
|
||||
// String returns the column's definition (as used in a CREATE TABLE statement). E.G:
|
||||
//
|
||||
// "my_column" bigint
|
||||
func (tc Column) String() string {
|
||||
return tc.Definition()
|
||||
}
|
||||
|
||||
// Definition returns the column's definition (as used in a CREATE TABLE statement). E.G:
|
||||
//
|
||||
// "my_column" bigint
|
||||
func (tc Column) Definition() string {
|
||||
return tc.Identifier() + " " + tc.Type
|
||||
}
|
||||
|
||||
// Identifier returns the column's quoted identifier.
|
||||
func (tc Column) Identifier() string {
|
||||
return QuoteIdentifier(tc.Name)
|
||||
}
|
||||
|
||||
// Selector returns the selector for the column. For most cases this is the same as Identifier.
|
||||
// However, in some cases, such as a UNION, this may return a statement such as `NULL AS "foo"`.
|
||||
func (tc Column) Selector() string {
|
||||
if tc.Type != "" {
|
||||
return tc.Identifier()
|
||||
}
|
||||
return "NULL AS " + tc.Identifier()
|
||||
}
|
||||
|
||||
// IsTag returns true if the column is a tag column. Otherwise, false.
|
||||
func (tc Column) IsTag() bool {
|
||||
return tc.Role == utils.TagColType
|
||||
}
|
||||
|
||||
// IsField returns true if the column is a field column. Otherwise, false.
|
||||
func (tc Column) IsField() bool {
|
||||
return tc.Role == utils.FieldColType
|
||||
}
|
||||
|
||||
// Columns represents an ordered list of Column objects, with convenience methods for operating on the
|
||||
// list.
|
||||
type Columns []Column
|
||||
|
||||
func NewColumns(cols []utils.Column) Columns {
|
||||
tcols := make(Columns, 0, len(cols))
|
||||
for _, col := range cols {
|
||||
tcols = append(tcols, Column(col))
|
||||
}
|
||||
return tcols
|
||||
}
|
||||
|
||||
// List returns the Columns object as a slice of Column.
|
||||
func (cols Columns) List() []Column {
|
||||
return cols
|
||||
}
|
||||
|
||||
// Definitions returns the list of column definitions.
|
||||
func (cols Columns) Definitions() []string {
|
||||
defs := make([]string, 0, len(cols))
|
||||
for _, tc := range cols {
|
||||
defs = append(defs, tc.Definition())
|
||||
}
|
||||
return defs
|
||||
}
|
||||
|
||||
// Identifiers returns the list of quoted column identifiers.
|
||||
func (cols Columns) Identifiers() []string {
|
||||
idents := make([]string, 0, len(cols))
|
||||
for _, tc := range cols {
|
||||
idents = append(idents, tc.Identifier())
|
||||
}
|
||||
return idents
|
||||
}
|
||||
|
||||
// Selectors returns the list of column selectors.
|
||||
func (cols Columns) Selectors() []string {
|
||||
selectors := make([]string, 0, len(cols))
|
||||
for _, tc := range cols {
|
||||
selectors = append(selectors, tc.Selector())
|
||||
}
|
||||
return selectors
|
||||
}
|
||||
|
||||
// String returns the comma delimited list of column identifiers.
|
||||
func (cols Columns) String() string {
|
||||
colStrs := make([]string, 0, len(cols))
|
||||
for _, tc := range cols {
|
||||
colStrs = append(colStrs, tc.String())
|
||||
}
|
||||
return strings.Join(colStrs, ", ")
|
||||
}
|
||||
|
||||
// Keys returns a Columns list of the columns which are not fields (e.g. time, tag_id, & tags).
|
||||
func (cols Columns) Keys() Columns {
|
||||
var newCols []Column
|
||||
for _, tc := range cols {
|
||||
if tc.Role != utils.FieldColType {
|
||||
newCols = append(newCols, tc)
|
||||
}
|
||||
}
|
||||
return newCols
|
||||
}
|
||||
|
||||
// Sorted returns a sorted copy of Columns.
|
||||
//
|
||||
// Columns are sorted so that they are in order as: [Time, Tags, Fields], with the columns within each group sorted alphabetically.
|
||||
func (cols Columns) Sorted() Columns {
|
||||
newCols := make(Columns, 0, len(cols))
|
||||
newCols = append(newCols, cols...)
|
||||
(*utils.ColumnList)(unsafe.Pointer(&newCols)).Sort() //nolint:gosec // G103: Valid use of unsafe call to speed up sorting
|
||||
return newCols
|
||||
}
|
||||
|
||||
// Concat returns a copy of Columns with the given tcsList appended to the end.
|
||||
func (cols Columns) Concat(tcsList ...Columns) Columns {
|
||||
tcsNew := make(Columns, 0, len(cols)+len(tcsList))
|
||||
tcsNew = append(tcsNew, cols...)
|
||||
for _, tcs := range tcsList {
|
||||
tcsNew = append(tcsNew, tcs...)
|
||||
}
|
||||
return tcsNew
|
||||
}
|
||||
|
||||
// Tags returns a Columns list of the columns which are tags.
|
||||
func (cols Columns) Tags() Columns {
|
||||
var newCols []Column
|
||||
for _, tc := range cols {
|
||||
if tc.Role == utils.TagColType {
|
||||
newCols = append(newCols, tc)
|
||||
}
|
||||
}
|
||||
return newCols
|
||||
}
|
||||
|
||||
// Fields returns a Columns list of the columns which are fields.
|
||||
func (cols Columns) Fields() Columns {
|
||||
var newCols []Column
|
||||
for _, tc := range cols {
|
||||
if tc.Role == utils.FieldColType {
|
||||
newCols = append(newCols, tc)
|
||||
}
|
||||
}
|
||||
return newCols
|
||||
}
|
||||
|
||||
// Hash returns a hash of the column names. The hash is base-32 encoded string, up to 7 characters long with no padding.
|
||||
//
|
||||
// This can be useful as an identifier for supporting table renaming + unions in the case of non-modifiable tables.
|
||||
func (cols Columns) Hash() string {
|
||||
hash := fnv.New32a()
|
||||
for _, tc := range cols.Sorted() {
|
||||
hash.Write([]byte(tc.Name))
|
||||
hash.Write([]byte{0})
|
||||
}
|
||||
return strings.ToLower(base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(hash.Sum(nil)))
|
||||
}
|
||||
|
||||
type Template template.Template
|
||||
|
||||
func (t *Template) UnmarshalText(text []byte) error {
|
||||
tmpl := template.New("")
|
||||
tmpl.Option("missingkey=error")
|
||||
tmpl.Funcs(templateFuncs)
|
||||
tmpl.Funcs(sprig.TxtFuncMap())
|
||||
tt, err := tmpl.Parse(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Template(*tt)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Template) Render(table *Table, newColumns []utils.Column, metricTable, tagTable *Table) ([]byte, error) {
|
||||
tcs := NewColumns(newColumns).Sorted()
|
||||
data := map[string]interface{}{
|
||||
"table": table,
|
||||
"columns": tcs,
|
||||
"allColumns": tcs.Concat(table.Columns).Sorted(),
|
||||
"metricTable": metricTable,
|
||||
"tagTable": tagTable,
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
err := (*template.Template)(t).Execute(buf, data)
|
||||
return buf.Bytes(), err
|
||||
}
|
448
plugins/outputs/postgresql/table_manager.go
Normal file
448
plugins/outputs/postgresql/table_manager.go
Normal file
|
@ -0,0 +1,448 @@
|
|||
package postgresql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate"
|
||||
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
|
||||
)
|
||||
|
||||
// This is an arbitrary constant value shared between multiple telegraf processes used for locking schema updates.
|
||||
const schemaAdvisoryLockID int64 = 5705450890675909945
|
||||
|
||||
type tableState struct {
|
||||
name string
|
||||
columns map[string]utils.Column
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
type TableManager struct {
|
||||
*Postgresql
|
||||
|
||||
// map[tableName]map[columnName]utils.Column
|
||||
tables map[string]*tableState
|
||||
tablesMutex sync.Mutex
|
||||
|
||||
// Map to track which columns are already logged
|
||||
loggedLongColumnWarn map[string]bool
|
||||
loggedLongColumnErr map[string]bool
|
||||
}
|
||||
|
||||
// NewTableManager returns an instance of the tables.Manager interface
|
||||
// that can handle checking and updating the state of tables in the PG database.
|
||||
func NewTableManager(postgresql *Postgresql) *TableManager {
|
||||
return &TableManager{
|
||||
Postgresql: postgresql,
|
||||
tables: make(map[string]*tableState),
|
||||
loggedLongColumnWarn: make(map[string]bool),
|
||||
loggedLongColumnErr: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// ClearTableCache clear the table structure cache.
|
||||
func (tm *TableManager) ClearTableCache() {
|
||||
tm.tablesMutex.Lock()
|
||||
for _, tbl := range tm.tables {
|
||||
tbl.Lock()
|
||||
tbl.columns = nil
|
||||
tbl.Unlock()
|
||||
}
|
||||
tm.tablesMutex.Unlock()
|
||||
|
||||
if tm.tagsCache != nil {
|
||||
tm.tagsCache.Clear()
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *TableManager) table(name string) *tableState {
|
||||
tm.tablesMutex.Lock()
|
||||
tbl := tm.tables[name]
|
||||
if tbl == nil {
|
||||
tbl = &tableState{name: name}
|
||||
tm.tables[name] = tbl
|
||||
}
|
||||
tm.tablesMutex.Unlock()
|
||||
return tbl
|
||||
}
|
||||
|
||||
// MatchSource scans through the metrics, determining what columns are needed for inserting, and ensuring the DB schema matches.
|
||||
//
|
||||
// If the schema does not match, and schema updates are disabled:
|
||||
// If a field missing from the DB, the field is omitted.
|
||||
// If a tag is missing from the DB, the metric is dropped.
|
||||
func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *TableSource) error {
|
||||
metricTable := tm.table(rowSource.Name())
|
||||
var tagTable *tableState
|
||||
if tm.TagsAsForeignKeys {
|
||||
tagTable = tm.table(metricTable.name + tm.TagTableSuffix)
|
||||
|
||||
missingCols, err := tm.EnsureStructure(
|
||||
ctx,
|
||||
db,
|
||||
tagTable,
|
||||
rowSource.TagTableColumns(),
|
||||
tm.TagTableCreateTemplates,
|
||||
tm.TagTableAddColumnTemplates,
|
||||
metricTable,
|
||||
tagTable,
|
||||
)
|
||||
if err != nil {
|
||||
if isTempError(err) {
|
||||
return err
|
||||
}
|
||||
tm.Postgresql.Logger.Errorf("Permanent error updating schema for %s: %v", tagTable.name, err)
|
||||
}
|
||||
|
||||
if len(missingCols) > 0 {
|
||||
colDefs := make([]string, 0, len(missingCols))
|
||||
for _, col := range missingCols {
|
||||
if err := rowSource.DropColumn(col); err != nil {
|
||||
return fmt.Errorf("metric/table mismatch: Unable to omit field/column from %q: %w", tagTable.name, err)
|
||||
}
|
||||
colDefs = append(colDefs, col.Name+" "+col.Type)
|
||||
}
|
||||
tm.Logger.Errorf("Table %q is missing tag columns (dropping metrics): %s",
|
||||
tagTable.name,
|
||||
strings.Join(colDefs, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
missingCols, err := tm.EnsureStructure(
|
||||
ctx,
|
||||
db,
|
||||
metricTable,
|
||||
rowSource.MetricTableColumns(),
|
||||
tm.CreateTemplates,
|
||||
tm.AddColumnTemplates,
|
||||
metricTable,
|
||||
tagTable,
|
||||
)
|
||||
if err != nil {
|
||||
if isTempError(err) {
|
||||
return err
|
||||
}
|
||||
tm.Postgresql.Logger.Errorf("Permanent error updating schema for %s: %v", metricTable.name, err)
|
||||
}
|
||||
|
||||
if len(missingCols) > 0 {
|
||||
colDefs := make([]string, 0, len(missingCols))
|
||||
for _, col := range missingCols {
|
||||
if err := rowSource.DropColumn(col); err != nil {
|
||||
return fmt.Errorf("metric/table mismatch: Unable to omit field/column from %q: %w", metricTable.name, err)
|
||||
}
|
||||
colDefs = append(colDefs, col.Name+" "+col.Type)
|
||||
}
|
||||
tm.Logger.Errorf("Table %q is missing columns (omitting fields): %s",
|
||||
metricTable.name,
|
||||
strings.Join(colDefs, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureStructure ensures that the table identified by tableName contains the provided columns.
|
||||
//
|
||||
// createTemplates and addColumnTemplates are the templates which are executed in the event of table create or alter
|
||||
// (respectively).
|
||||
// metricsTableName and tagsTableName are passed to the templates.
|
||||
//
|
||||
// If the table cannot be modified, the returned column list is the columns which are missing from the table. This
|
||||
// includes when an error is returned.
|
||||
//
|
||||
//nolint:revive //argument-limit conditionally more arguments allowed
|
||||
func (tm *TableManager) EnsureStructure(
|
||||
ctx context.Context,
|
||||
db dbh,
|
||||
tbl *tableState,
|
||||
columns []utils.Column,
|
||||
createTemplates, addColumnsTemplates []*sqltemplate.Template,
|
||||
metricsTable, tagsTable *tableState,
|
||||
) ([]utils.Column, error) {
|
||||
// Sort so that:
|
||||
// * When we create/alter the table the columns are in a sane order (telegraf gives us the fields in random order)
|
||||
// * When we display errors about missing columns, the order is also sane, and consistent
|
||||
utils.ColumnList(columns).Sort()
|
||||
|
||||
// rlock, read, runlock, wlock, read, read_db, wlock_db, read_db, write_db, wunlock_db, wunlock
|
||||
|
||||
// rlock
|
||||
tbl.RLock()
|
||||
// read
|
||||
currCols := tbl.columns
|
||||
// runlock
|
||||
tbl.RUnlock()
|
||||
missingCols := diffMissingColumns(currCols, columns)
|
||||
if len(missingCols) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// check that the missing columns are columns that can be added
|
||||
addColumns := make([]utils.Column, 0, len(missingCols))
|
||||
invalidColumns := make([]utils.Column, 0, len(missingCols))
|
||||
for i, col := range missingCols {
|
||||
if tm.ColumnNameLenLimit > 0 && len(col.Name) > tm.ColumnNameLenLimit {
|
||||
if !tm.loggedLongColumnWarn[col.Name] {
|
||||
tm.Postgresql.Logger.Warnf("Limiting too long column name: %q", col.Name)
|
||||
tm.loggedLongColumnWarn[col.Name] = true
|
||||
}
|
||||
col.Name = col.Name[:tm.ColumnNameLenLimit]
|
||||
missingCols[i] = col
|
||||
}
|
||||
if validateColumnName(col.Name) {
|
||||
addColumns = append(addColumns, col)
|
||||
continue
|
||||
}
|
||||
|
||||
if col.Role == utils.TagColType {
|
||||
return nil, fmt.Errorf("column name too long: %q", col.Name)
|
||||
}
|
||||
if !tm.loggedLongColumnErr[col.Name] {
|
||||
tm.Postgresql.Logger.Errorf("Column name too long: %q", col.Name)
|
||||
tm.loggedLongColumnErr[col.Name] = true
|
||||
}
|
||||
invalidColumns = append(invalidColumns, col)
|
||||
}
|
||||
|
||||
// wlock
|
||||
// We also need to lock the other table as it may be referenced by a template.
|
||||
// To prevent deadlock, the metric & tag table must always be locked in the same order: 1) Tag, 2) Metric
|
||||
if tbl == tagsTable {
|
||||
tagsTable.Lock()
|
||||
defer tagsTable.Unlock()
|
||||
|
||||
metricsTable.RLock()
|
||||
defer metricsTable.RUnlock()
|
||||
} else {
|
||||
if tagsTable != nil {
|
||||
tagsTable.RLock()
|
||||
defer tagsTable.RUnlock()
|
||||
}
|
||||
|
||||
metricsTable.Lock()
|
||||
defer metricsTable.Unlock()
|
||||
}
|
||||
|
||||
// read
|
||||
currCols = tbl.columns
|
||||
addColumns = diffMissingColumns(currCols, addColumns)
|
||||
if len(addColumns) == 0 {
|
||||
return invalidColumns, nil
|
||||
}
|
||||
|
||||
// read_db
|
||||
var err error
|
||||
if currCols, err = tm.getColumns(ctx, db, tbl.name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tbl.columns = currCols
|
||||
addColumns = diffMissingColumns(currCols, addColumns)
|
||||
if len(addColumns) == 0 {
|
||||
tbl.columns = currCols
|
||||
return invalidColumns, nil
|
||||
}
|
||||
|
||||
if len(currCols) == 0 && len(createTemplates) == 0 {
|
||||
// can't create
|
||||
return columns, nil
|
||||
}
|
||||
if len(currCols) != 0 && len(addColumnsTemplates) == 0 {
|
||||
// can't add
|
||||
return append(addColumns, invalidColumns...), nil
|
||||
}
|
||||
if len(currCols) == 0 && !tm.validateTableName(tbl.name) {
|
||||
return nil, fmt.Errorf("table name too long: %s", tbl.name)
|
||||
}
|
||||
|
||||
// wlock_db
|
||||
tx, err := db.Begin(ctx)
|
||||
if err != nil {
|
||||
return append(addColumns, invalidColumns...), err
|
||||
}
|
||||
defer tx.Rollback(ctx) //nolint:errcheck // In case of failure during commit, "err" from commit will be returned
|
||||
// It's possible to have multiple telegraf processes, in which we can't ensure they all lock tables in the same
|
||||
// order. So to prevent possible deadlocks, we have to have a single lock for all schema modifications.
|
||||
if _, err := tx.Exec(ctx, "SELECT pg_advisory_xact_lock($1)", schemaAdvisoryLockID); err != nil {
|
||||
return append(addColumns, invalidColumns...), err
|
||||
}
|
||||
|
||||
// read_db
|
||||
if currCols, err = tm.getColumns(ctx, tx, tbl.name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tbl.columns = currCols
|
||||
if currCols != nil {
|
||||
addColumns = diffMissingColumns(currCols, addColumns)
|
||||
if len(addColumns) == 0 {
|
||||
return invalidColumns, nil
|
||||
}
|
||||
}
|
||||
|
||||
// write_db
|
||||
var tmpls []*sqltemplate.Template
|
||||
if len(currCols) == 0 {
|
||||
tmpls = createTemplates
|
||||
} else {
|
||||
tmpls = addColumnsTemplates
|
||||
}
|
||||
if err := tm.update(ctx, tx, tbl, tmpls, addColumns, metricsTable, tagsTable); err != nil {
|
||||
return append(addColumns, invalidColumns...), err
|
||||
}
|
||||
|
||||
if currCols, err = tm.getColumns(ctx, tx, tbl.name); err != nil {
|
||||
return append(addColumns, invalidColumns...), err
|
||||
}
|
||||
|
||||
if err := tx.Commit(ctx); err != nil {
|
||||
return append(addColumns, invalidColumns...), err
|
||||
}
|
||||
|
||||
tbl.columns = currCols
|
||||
|
||||
// wunlock_db (deferred)
|
||||
// wunlock (deferred)
|
||||
|
||||
return invalidColumns, nil
|
||||
}
|
||||
|
||||
func (tm *TableManager) getColumns(ctx context.Context, db dbh, name string) (map[string]utils.Column, error) {
|
||||
rows, err := db.Query(ctx, `
|
||||
SELECT
|
||||
column_name,
|
||||
CASE WHEN data_type='USER-DEFINED' THEN udt_name ELSE data_type END,
|
||||
col_description(format('%I.%I', table_schema, table_name)::regclass::oid, ordinal_position)
|
||||
FROM information_schema.columns
|
||||
WHERE table_schema = $1 and table_name = $2`, tm.Schema, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
cols := make(map[string]utils.Column)
|
||||
for rows.Next() {
|
||||
var colName, colType string
|
||||
desc := new(string)
|
||||
err := rows.Scan(&colName, &colType, &desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
role := utils.FieldColType
|
||||
switch colName {
|
||||
case tm.timeColumn.Name:
|
||||
role = utils.TimeColType
|
||||
case tm.tagIDColumn.Name:
|
||||
role = utils.TagsIDColType
|
||||
case tm.tagsJSONColumn.Name:
|
||||
role = utils.TagColType
|
||||
case tm.fieldsJSONColumn.Name:
|
||||
role = utils.FieldColType
|
||||
default:
|
||||
// We don't want to monopolize the column comment (preventing user from storing other information there),
|
||||
// so just look at the first word
|
||||
if desc != nil {
|
||||
descWords := strings.Split(*desc, " ")
|
||||
if descWords[0] == "tag" {
|
||||
role = utils.TagColType
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cols[colName] = utils.Column{
|
||||
Name: colName,
|
||||
Type: colType,
|
||||
Role: role,
|
||||
}
|
||||
}
|
||||
|
||||
return cols, rows.Err()
|
||||
}
|
||||
|
||||
//nolint:revive //argument-limit conditionally more arguments allowed
|
||||
func (tm *TableManager) update(ctx context.Context,
|
||||
tx pgx.Tx,
|
||||
state *tableState,
|
||||
tmpls []*sqltemplate.Template,
|
||||
missingCols []utils.Column,
|
||||
metricsTable, tagsTable *tableState,
|
||||
) error {
|
||||
tmplTable := sqltemplate.NewTable(tm.Schema, state.name, colMapToSlice(state.columns))
|
||||
metricsTmplTable := sqltemplate.NewTable(tm.Schema, metricsTable.name, colMapToSlice(metricsTable.columns))
|
||||
var tagsTmplTable *sqltemplate.Table
|
||||
if tagsTable != nil {
|
||||
tagsTmplTable = sqltemplate.NewTable(tm.Schema, tagsTable.name, colMapToSlice(tagsTable.columns))
|
||||
} else {
|
||||
tagsTmplTable = sqltemplate.NewTable("", "", nil)
|
||||
}
|
||||
|
||||
for _, tmpl := range tmpls {
|
||||
sql, err := tmpl.Render(tmplTable, missingCols, metricsTmplTable, tagsTmplTable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tx.Exec(ctx, string(sql)); err != nil {
|
||||
return fmt.Errorf("executing %q: %w", sql, err)
|
||||
}
|
||||
}
|
||||
|
||||
// We need to be able to determine the role of the column when reading the structure back (because of the templates).
|
||||
// For some columns we can determine this by the column name (time, tag_id, etc). However tags and fields can have any
|
||||
// name, and look the same. So we add a comment to tag columns, and through process of elimination what remains are
|
||||
// field columns.
|
||||
for _, col := range missingCols {
|
||||
if col.Role != utils.TagColType {
|
||||
continue
|
||||
}
|
||||
stmt := fmt.Sprintf("COMMENT ON COLUMN %s.%s IS 'tag'",
|
||||
tmplTable.String(), sqltemplate.QuoteIdentifier(col.Name))
|
||||
if _, err := tx.Exec(ctx, stmt); err != nil {
|
||||
return fmt.Errorf("setting column role comment: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const maxIdentifierLength = 63
|
||||
|
||||
func (tm *TableManager) validateTableName(name string) bool {
|
||||
if tm.Postgresql.TagsAsForeignKeys {
|
||||
return len([]byte(name))+len([]byte(tm.Postgresql.TagTableSuffix)) <= maxIdentifierLength
|
||||
}
|
||||
return len([]byte(name)) <= maxIdentifierLength
|
||||
}
|
||||
|
||||
func validateColumnName(name string) bool {
|
||||
return len([]byte(name)) <= maxIdentifierLength
|
||||
}
|
||||
|
||||
// diffMissingColumns filters srcColumns to the ones not present in dbColumns.
|
||||
func diffMissingColumns(dbColumns map[string]utils.Column, srcColumns []utils.Column) []utils.Column {
|
||||
if len(dbColumns) == 0 {
|
||||
return srcColumns
|
||||
}
|
||||
|
||||
var missingColumns []utils.Column
|
||||
for _, srcCol := range srcColumns {
|
||||
if _, ok := dbColumns[srcCol.Name]; !ok {
|
||||
missingColumns = append(missingColumns, srcCol)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return missingColumns
|
||||
}
|
||||
|
||||
func colMapToSlice(colMap map[string]utils.Column) []utils.Column {
|
||||
if colMap == nil {
|
||||
return nil
|
||||
}
|
||||
cols := make([]utils.Column, 0, len(colMap))
|
||||
for _, col := range colMap {
|
||||
cols = append(cols, col)
|
||||
}
|
||||
return cols
|
||||
}
|
515
plugins/outputs/postgresql/table_manager_test.go
Normal file
515
plugins/outputs/postgresql/table_manager_test.go
Normal file
|
@ -0,0 +1,515 @@
|
|||
package postgresql
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate"
|
||||
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
|
||||
)
|
||||
|
||||
func TestTableManagerIntegration_EnsureStructure(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
cols := []utils.Column{
|
||||
p.columnFromTag("foo", ""),
|
||||
p.columnFromField("baz", 0),
|
||||
}
|
||||
missingCols, err := p.tableManager.EnsureStructure(
|
||||
ctx,
|
||||
p.db,
|
||||
p.tableManager.table(t.Name()),
|
||||
cols,
|
||||
p.CreateTemplates,
|
||||
p.AddColumnTemplates,
|
||||
p.tableManager.table(t.Name()),
|
||||
nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, missingCols)
|
||||
|
||||
tblCols := p.tableManager.table(t.Name()).columns
|
||||
require.EqualValues(t, cols[0], tblCols["foo"])
|
||||
require.EqualValues(t, cols[1], tblCols["baz"])
|
||||
}
|
||||
|
||||
func TestTableManagerIntegration_EnsureStructure_alter(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
cols := []utils.Column{
|
||||
p.columnFromTag("foo", ""),
|
||||
p.columnFromField("bar", 0),
|
||||
}
|
||||
_, err = p.tableManager.EnsureStructure(
|
||||
ctx,
|
||||
p.db,
|
||||
p.tableManager.table(t.Name()),
|
||||
cols,
|
||||
p.CreateTemplates,
|
||||
p.AddColumnTemplates,
|
||||
p.tableManager.table(t.Name()),
|
||||
nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
cols = append(cols, p.columnFromField("baz", 0))
|
||||
missingCols, err := p.tableManager.EnsureStructure(
|
||||
ctx,
|
||||
p.db,
|
||||
p.tableManager.table(t.Name()),
|
||||
cols,
|
||||
p.CreateTemplates,
|
||||
p.AddColumnTemplates,
|
||||
p.tableManager.table(t.Name()),
|
||||
nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, missingCols)
|
||||
|
||||
tblCols := p.tableManager.table(t.Name()).columns
|
||||
require.EqualValues(t, cols[0], tblCols["foo"])
|
||||
require.EqualValues(t, cols[1], tblCols["bar"])
|
||||
require.EqualValues(t, cols[2], tblCols["baz"])
|
||||
}
|
||||
|
||||
func TestTableManagerIntegration_EnsureStructure_overflowTableName(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
tbl := p.tableManager.table("ăăăăăăăăăăăăăăăăăăăăăăăăăăăăăăăă") // 32 2-byte unicode characters = 64 bytes
|
||||
cols := []utils.Column{
|
||||
p.columnFromField("foo", 0),
|
||||
}
|
||||
_, err = p.tableManager.EnsureStructure(
|
||||
ctx,
|
||||
p.db,
|
||||
tbl,
|
||||
cols,
|
||||
p.CreateTemplates,
|
||||
p.AddColumnTemplates,
|
||||
tbl,
|
||||
nil,
|
||||
)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "table name too long")
|
||||
require.False(t, isTempError(err))
|
||||
}
|
||||
|
||||
func TestTableManagerIntegration_EnsureStructure_overflowTagName(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
tbl := p.tableManager.table(t.Name())
|
||||
cols := []utils.Column{
|
||||
p.columnFromTag("ăăăăăăăăăăăăăăăăăăăăăăăăăăăăăăăă", "a"), // 32 2-byte unicode characters = 64 bytes
|
||||
p.columnFromField("foo", 0),
|
||||
}
|
||||
_, err = p.tableManager.EnsureStructure(
|
||||
ctx,
|
||||
p.db,
|
||||
tbl,
|
||||
cols,
|
||||
p.CreateTemplates,
|
||||
p.AddColumnTemplates,
|
||||
tbl,
|
||||
nil,
|
||||
)
|
||||
require.Error(t, err)
|
||||
require.False(t, isTempError(err))
|
||||
}
|
||||
|
||||
func TestTableManagerIntegration_EnsureStructure_overflowFieldName(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
tbl := p.tableManager.table(t.Name())
|
||||
cols := []utils.Column{
|
||||
p.columnFromField("foo", 0),
|
||||
p.columnFromField("ăăăăăăăăăăăăăăăăăăăăăăăăăăăăăăăă", 0),
|
||||
}
|
||||
missingCols, err := p.tableManager.EnsureStructure(
|
||||
ctx,
|
||||
p.db,
|
||||
tbl,
|
||||
cols,
|
||||
p.CreateTemplates,
|
||||
p.AddColumnTemplates,
|
||||
tbl,
|
||||
nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, missingCols, 1)
|
||||
require.Equal(t, cols[1], missingCols[0])
|
||||
}
|
||||
|
||||
func TestTableManagerIntegration_getColumns(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
cols := []utils.Column{
|
||||
p.columnFromTag("foo", ""),
|
||||
p.columnFromField("baz", 0),
|
||||
}
|
||||
_, err = p.tableManager.EnsureStructure(
|
||||
ctx,
|
||||
p.db,
|
||||
p.tableManager.table(t.Name()),
|
||||
cols,
|
||||
p.CreateTemplates,
|
||||
p.AddColumnTemplates,
|
||||
p.tableManager.table(t.Name()),
|
||||
nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
p.tableManager.ClearTableCache()
|
||||
require.Empty(t, p.tableManager.table(t.Name()).columns)
|
||||
|
||||
curCols, err := p.tableManager.getColumns(ctx, p.db, t.Name())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.EqualValues(t, cols[0], curCols["foo"])
|
||||
require.EqualValues(t, cols[1], curCols["baz"])
|
||||
}
|
||||
|
||||
func TestTableManagerIntegration_MatchSource(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsForeignKeys = true
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
require.Contains(t, p.tableManager.table(t.Name()+p.TagTableSuffix).columns, "tag")
|
||||
require.Contains(t, p.tableManager.table(t.Name()).columns, "a")
|
||||
}
|
||||
|
||||
func TestTableManagerIntegration_MatchSource_UnsignedIntegers(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.Uint64Type = PgUint8
|
||||
require.NoError(t, p.Init())
|
||||
if err := p.Connect(); err != nil {
|
||||
if strings.Contains(err.Error(), "retrieving OID for uint8 data type") {
|
||||
t.Skipf("pguint extension is not installed")
|
||||
t.SkipNow()
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", nil, MSI{"a": uint64(1)}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
require.Equal(t, PgUint8, p.tableManager.table(t.Name()).columns["a"].Type)
|
||||
}
|
||||
|
||||
func TestTableManagerIntegration_noCreateTable(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.CreateTemplates = nil
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
|
||||
require.Error(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
}
|
||||
|
||||
func TestTableManagerIntegration_noCreateTagTable(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagTableCreateTemplates = nil
|
||||
p.TagsAsForeignKeys = true
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
|
||||
require.Error(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
}
|
||||
|
||||
// verify that TableManager updates & caches the DB table structure unless the incoming metric can't fit.
|
||||
func TestTableManagerIntegration_cache(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsForeignKeys = true
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
}
|
||||
|
||||
// Verify that when alter statements are disabled and a metric comes in with a new tag key, that the tag is omitted.
|
||||
func TestTableManagerIntegration_noAlterMissingTag(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.AddColumnTemplates = make([]*sqltemplate.Template, 0)
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
|
||||
metrics = []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}),
|
||||
newMetric(t, "", MSS{"tag": "foo", "bar": "baz"}, MSI{"a": 3}),
|
||||
}
|
||||
tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
require.NotContains(t, tsrc.ColumnNames(), "bar")
|
||||
}
|
||||
|
||||
// Verify that when using foreign tags and alter statements are disabled and a metric comes in with a new tag key, that
|
||||
// the tag is omitted.
|
||||
func TestTableManagerIntegration_noAlterMissingTagTableTag(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsForeignKeys = true
|
||||
p.TagTableAddColumnTemplates = make([]*sqltemplate.Template, 0)
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
|
||||
metrics = []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}),
|
||||
newMetric(t, "", MSS{"tag": "foo", "bar": "baz"}, MSI{"a": 3}),
|
||||
}
|
||||
tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
ttsrc := NewTagTableSource(tsrc)
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
require.NotContains(t, ttsrc.ColumnNames(), "bar")
|
||||
}
|
||||
|
||||
// Verify that when using foreign tags and alter statements generate a permanent error and a metric comes in with a new
|
||||
// tag key, that the tag is omitted.
|
||||
func TestTableManagerIntegration_badAlterTagTable(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsForeignKeys = true
|
||||
tmpl := &sqltemplate.Template{}
|
||||
require.NoError(t, tmpl.UnmarshalText([]byte("bad")))
|
||||
p.TagTableAddColumnTemplates = []*sqltemplate.Template{tmpl}
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
|
||||
metrics = []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}),
|
||||
newMetric(t, "", MSS{"tag": "foo", "bar": "baz"}, MSI{"a": 3}),
|
||||
}
|
||||
tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
ttsrc := NewTagTableSource(tsrc)
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
require.NotContains(t, ttsrc.ColumnNames(), "bar")
|
||||
}
|
||||
|
||||
// verify that when alter statements are disabled and a metric comes in with a new field key, that the field is omitted.
|
||||
func TestTableManagerIntegration_noAlterMissingField(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.AddColumnTemplates = make([]*sqltemplate.Template, 0)
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
|
||||
metrics = []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}),
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 3, "b": 3}),
|
||||
}
|
||||
tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
require.NotContains(t, tsrc.ColumnNames(), "b")
|
||||
}
|
||||
|
||||
// verify that when alter statements generate a permanent error and a metric comes in with a new field key, that the field is omitted.
|
||||
func TestTableManagerIntegration_badAlterField(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
tmpl := &sqltemplate.Template{}
|
||||
require.NoError(t, tmpl.UnmarshalText([]byte("bad")))
|
||||
p.AddColumnTemplates = []*sqltemplate.Template{tmpl}
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
|
||||
metrics = []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}),
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 3, "b": 3}),
|
||||
}
|
||||
tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
require.NotContains(t, tsrc.ColumnNames(), "b")
|
||||
}
|
||||
|
||||
func TestTableManager_addColumnTemplates(t *testing.T) {
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsForeignKeys = true
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"foo": "bar"}, MSI{"a": 1}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
|
||||
p, err = newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsForeignKeys = true
|
||||
tmpl := &sqltemplate.Template{}
|
||||
require.NoError(t, tmpl.UnmarshalText([]byte(`-- addColumnTemplate: {{ . }}`)))
|
||||
p.AddColumnTemplates = []*sqltemplate.Template{tmpl}
|
||||
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
metrics = []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"pop": "tart"}, MSI{"a": 1, "b": 2}),
|
||||
}
|
||||
tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
p.Logger.Info("ok")
|
||||
|
||||
expected := `CREATE TABLE "public"."TestTableManager_addColumnTemplates" ("time" timestamp without time zone, "tag_id" bigint, "a" bigint, "b" bigint)`
|
||||
stmtCount := 0
|
||||
for _, log := range p.Logger.Logs() {
|
||||
if strings.Contains(log.String(), expected) {
|
||||
stmtCount++
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, 1, stmtCount)
|
||||
}
|
||||
|
||||
func TestTableManager_TimeWithTimezone(t *testing.T) {
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsForeignKeys = true
|
||||
p.TimestampColumnType = "timestamp with time zone"
|
||||
require.NoError(t, p.Init())
|
||||
require.NoError(t, p.Connect())
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"pop": "tart"}, MSI{"a": 1, "b": 2}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
|
||||
p.Logger.Info("ok")
|
||||
|
||||
expected := `CREATE TABLE "public"."TestTableManager_TimeWithTimezone" ("time" timestamp with time zone, "tag_id" bigint, "a" bigint, "b" bigint)`
|
||||
stmtCount := 0
|
||||
for _, log := range p.Logger.Logs() {
|
||||
if strings.Contains(log.String(), expected) {
|
||||
stmtCount++
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, 1, stmtCount)
|
||||
}
|
436
plugins/outputs/postgresql/table_source.go
Normal file
436
plugins/outputs/postgresql/table_source.go
Normal file
|
@ -0,0 +1,436 @@
|
|||
package postgresql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
|
||||
)
|
||||
|
||||
type columnList struct {
|
||||
columns []utils.Column
|
||||
indices map[string]int
|
||||
}
|
||||
|
||||
func newColumnList() *columnList {
|
||||
return &columnList{
|
||||
indices: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
func (cl *columnList) Add(column utils.Column) {
|
||||
if _, ok := cl.indices[column.Name]; ok {
|
||||
return
|
||||
}
|
||||
cl.columns = append(cl.columns, column)
|
||||
cl.indices[column.Name] = len(cl.columns) - 1
|
||||
}
|
||||
|
||||
func (cl *columnList) Remove(name string) bool {
|
||||
idx, ok := cl.indices[name]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
cl.columns = append(cl.columns[:idx], cl.columns[idx+1:]...)
|
||||
delete(cl.indices, name)
|
||||
|
||||
for i, col := range cl.columns[idx:] {
|
||||
cl.indices[col.Name] = idx + i
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TableSource satisfies pgx.CopyFromSource
|
||||
type TableSource struct {
|
||||
postgresql *Postgresql
|
||||
metrics []telegraf.Metric
|
||||
cursor int
|
||||
cursorValues []interface{}
|
||||
cursorError error
|
||||
// tagHashSalt is so that we can use a global tag cache for all tables. The salt is unique per table, and combined
|
||||
// with the tag ID when looked up in the cache.
|
||||
tagHashSalt int64
|
||||
|
||||
tagColumns *columnList
|
||||
// tagSets is the list of tag IDs to tag values in use within the TableSource. The position of each value in the list
|
||||
// corresponds to the key name in the tagColumns list.
|
||||
// This data is used to build out the foreign tag table when enabled.
|
||||
tagSets map[int64][]*telegraf.Tag
|
||||
|
||||
fieldColumns *columnList
|
||||
|
||||
droppedTagColumns []string
|
||||
}
|
||||
|
||||
func NewTableSources(p *Postgresql, metrics []telegraf.Metric) map[string]*TableSource {
|
||||
tableSources := make(map[string]*TableSource)
|
||||
|
||||
for _, m := range metrics {
|
||||
tsrc := tableSources[m.Name()]
|
||||
if tsrc == nil {
|
||||
tsrc = NewTableSource(p, m.Name())
|
||||
tableSources[m.Name()] = tsrc
|
||||
}
|
||||
tsrc.AddMetric(m)
|
||||
}
|
||||
|
||||
return tableSources
|
||||
}
|
||||
|
||||
func NewTableSource(postgresql *Postgresql, name string) *TableSource {
|
||||
h := fnv.New64a()
|
||||
h.Write([]byte(name))
|
||||
|
||||
tsrc := &TableSource{
|
||||
postgresql: postgresql,
|
||||
cursor: -1,
|
||||
tagSets: make(map[int64][]*telegraf.Tag),
|
||||
tagHashSalt: int64(h.Sum64()),
|
||||
}
|
||||
if !postgresql.TagsAsJsonb {
|
||||
tsrc.tagColumns = newColumnList()
|
||||
}
|
||||
if !postgresql.FieldsAsJsonb {
|
||||
tsrc.fieldColumns = newColumnList()
|
||||
}
|
||||
return tsrc
|
||||
}
|
||||
|
||||
func (tsrc *TableSource) AddMetric(metric telegraf.Metric) {
|
||||
if tsrc.postgresql.TagsAsForeignKeys {
|
||||
tagID := utils.GetTagID(metric)
|
||||
if _, ok := tsrc.tagSets[tagID]; !ok {
|
||||
tsrc.tagSets[tagID] = metric.TagList()
|
||||
}
|
||||
}
|
||||
|
||||
if !tsrc.postgresql.TagsAsJsonb {
|
||||
for _, t := range metric.TagList() {
|
||||
tsrc.tagColumns.Add(tsrc.postgresql.columnFromTag(t.Key, t.Value))
|
||||
}
|
||||
}
|
||||
|
||||
if !tsrc.postgresql.FieldsAsJsonb {
|
||||
for _, f := range metric.FieldList() {
|
||||
tsrc.fieldColumns.Add(tsrc.postgresql.columnFromField(f.Key, f.Value))
|
||||
}
|
||||
}
|
||||
|
||||
tsrc.metrics = append(tsrc.metrics, metric)
|
||||
}
|
||||
|
||||
func (tsrc *TableSource) Name() string {
|
||||
if len(tsrc.metrics) == 0 {
|
||||
return ""
|
||||
}
|
||||
return tsrc.metrics[0].Name()
|
||||
}
|
||||
|
||||
// TagColumns returns the superset of all tags of all metrics.
|
||||
func (tsrc *TableSource) TagColumns() []utils.Column {
|
||||
var cols []utils.Column
|
||||
|
||||
if tsrc.postgresql.TagsAsJsonb {
|
||||
cols = append(cols, tsrc.postgresql.tagsJSONColumn)
|
||||
} else {
|
||||
cols = append(cols, tsrc.tagColumns.columns...)
|
||||
}
|
||||
|
||||
return cols
|
||||
}
|
||||
|
||||
// FieldColumns returns the superset of all fields of all metrics.
|
||||
func (tsrc *TableSource) FieldColumns() []utils.Column {
|
||||
return tsrc.fieldColumns.columns
|
||||
}
|
||||
|
||||
// MetricTableColumns returns the full column list, including time, tag id or tags, and fields.
|
||||
func (tsrc *TableSource) MetricTableColumns() []utils.Column {
|
||||
cols := []utils.Column{
|
||||
tsrc.postgresql.timeColumn,
|
||||
}
|
||||
|
||||
if tsrc.postgresql.TagsAsForeignKeys {
|
||||
cols = append(cols, tsrc.postgresql.tagIDColumn)
|
||||
} else {
|
||||
cols = append(cols, tsrc.TagColumns()...)
|
||||
}
|
||||
|
||||
if tsrc.postgresql.FieldsAsJsonb {
|
||||
cols = append(cols, tsrc.postgresql.fieldsJSONColumn)
|
||||
} else {
|
||||
cols = append(cols, tsrc.FieldColumns()...)
|
||||
}
|
||||
|
||||
return cols
|
||||
}
|
||||
|
||||
func (tsrc *TableSource) TagTableColumns() []utils.Column {
|
||||
cols := []utils.Column{
|
||||
tsrc.postgresql.tagIDColumn,
|
||||
}
|
||||
|
||||
cols = append(cols, tsrc.TagColumns()...)
|
||||
|
||||
return cols
|
||||
}
|
||||
|
||||
func (tsrc *TableSource) ColumnNames() []string {
|
||||
cols := tsrc.MetricTableColumns()
|
||||
names := make([]string, 0, len(cols))
|
||||
for _, col := range cols {
|
||||
names = append(names, col.Name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// DropColumn drops the specified column.
|
||||
// If column is a tag column, any metrics containing the tag will be skipped.
|
||||
// If column is a field column, any metrics containing the field will have it omitted.
|
||||
func (tsrc *TableSource) DropColumn(col utils.Column) error {
|
||||
switch col.Role {
|
||||
case utils.TagColType:
|
||||
return tsrc.dropTagColumn(col)
|
||||
case utils.FieldColType:
|
||||
return tsrc.dropFieldColumn(col)
|
||||
case utils.TimeColType, utils.TagsIDColType:
|
||||
return fmt.Errorf("critical column %q", col.Name)
|
||||
default:
|
||||
return fmt.Errorf("internal error: unknown column %q", col.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Drops the tag column from conversion. Any metrics containing this tag will be skipped.
|
||||
func (tsrc *TableSource) dropTagColumn(col utils.Column) error {
|
||||
if col.Role != utils.TagColType || tsrc.postgresql.TagsAsJsonb {
|
||||
return fmt.Errorf("internal error: Tried to perform an invalid tag drop. measurement=%s tag=%s", tsrc.Name(), col.Name)
|
||||
}
|
||||
tsrc.droppedTagColumns = append(tsrc.droppedTagColumns, col.Name)
|
||||
|
||||
if !tsrc.tagColumns.Remove(col.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for setID, set := range tsrc.tagSets {
|
||||
for _, tag := range set {
|
||||
if tag.Key == col.Name {
|
||||
// The tag is defined, so drop the whole set
|
||||
delete(tsrc.tagSets, setID)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Drops the field column from conversion. Any metrics containing this field will have the field omitted.
|
||||
func (tsrc *TableSource) dropFieldColumn(col utils.Column) error {
|
||||
if col.Role != utils.FieldColType || tsrc.postgresql.FieldsAsJsonb {
|
||||
return fmt.Errorf("internal error: Tried to perform an invalid field drop. measurement=%s field=%s", tsrc.Name(), col.Name)
|
||||
}
|
||||
|
||||
tsrc.fieldColumns.Remove(col.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tsrc *TableSource) Next() bool {
|
||||
for {
|
||||
if tsrc.cursor+1 >= len(tsrc.metrics) {
|
||||
tsrc.cursorValues = nil
|
||||
tsrc.cursorError = nil
|
||||
return false
|
||||
}
|
||||
tsrc.cursor++
|
||||
|
||||
tsrc.cursorValues, tsrc.cursorError = tsrc.getValues()
|
||||
if tsrc.cursorValues != nil || tsrc.cursorError != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tsrc *TableSource) Reset() {
|
||||
tsrc.cursor = -1
|
||||
}
|
||||
|
||||
// getValues calculates the values for the metric at the cursor position.
|
||||
// If the metric cannot be emitted, such as due to dropped tags, or all fields dropped, the return value is nil.
|
||||
func (tsrc *TableSource) getValues() ([]interface{}, error) {
|
||||
metric := tsrc.metrics[tsrc.cursor]
|
||||
|
||||
values := []interface{}{
|
||||
metric.Time().UTC(),
|
||||
}
|
||||
|
||||
if !tsrc.postgresql.TagsAsForeignKeys {
|
||||
if !tsrc.postgresql.TagsAsJsonb {
|
||||
// tags_as_foreignkey=false, tags_as_json=false
|
||||
tagValues := make([]interface{}, len(tsrc.tagColumns.columns))
|
||||
for _, tag := range metric.TagList() {
|
||||
tagPos, ok := tsrc.tagColumns.indices[tag.Key]
|
||||
if !ok {
|
||||
// tag has been dropped, we can't emit, or we risk collision with another metric
|
||||
return nil, nil
|
||||
}
|
||||
tagValues[tagPos] = tag.Value
|
||||
}
|
||||
values = append(values, tagValues...)
|
||||
} else {
|
||||
// tags_as_foreign_key is false and tags_as_json is true
|
||||
values = append(values, utils.TagListToJSON(metric.TagList()))
|
||||
}
|
||||
} else {
|
||||
// tags_as_foreignkey is true
|
||||
tagID := utils.GetTagID(metric)
|
||||
if tsrc.postgresql.ForeignTagConstraint {
|
||||
if _, ok := tsrc.tagSets[tagID]; !ok {
|
||||
// tag has been dropped
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
values = append(values, tagID)
|
||||
}
|
||||
|
||||
if !tsrc.postgresql.FieldsAsJsonb {
|
||||
// fields_as_json is false
|
||||
fieldValues := make([]interface{}, len(tsrc.fieldColumns.columns))
|
||||
fieldsEmpty := true
|
||||
for _, field := range metric.FieldList() {
|
||||
// we might have dropped the field due to the table missing the column & schema updates being turned off
|
||||
if fPos, ok := tsrc.fieldColumns.indices[field.Key]; ok {
|
||||
fieldValues[fPos] = field.Value
|
||||
fieldsEmpty = false
|
||||
}
|
||||
}
|
||||
if fieldsEmpty {
|
||||
// all fields have been dropped. Don't emit a metric with just tags and no fields.
|
||||
return nil, nil
|
||||
}
|
||||
values = append(values, fieldValues...)
|
||||
} else {
|
||||
// fields_as_json is true
|
||||
value, err := utils.FieldListToJSON(metric.FieldList())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, value)
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (tsrc *TableSource) Values() ([]interface{}, error) {
|
||||
return tsrc.cursorValues, tsrc.cursorError
|
||||
}
|
||||
|
||||
func (*TableSource) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type TagTableSource struct {
|
||||
*TableSource
|
||||
tagIDs []int64
|
||||
|
||||
cursor int
|
||||
cursorValues []interface{}
|
||||
cursorError error
|
||||
}
|
||||
|
||||
func NewTagTableSource(tsrc *TableSource) *TagTableSource {
|
||||
ttsrc := &TagTableSource{
|
||||
TableSource: tsrc,
|
||||
cursor: -1,
|
||||
}
|
||||
|
||||
ttsrc.tagIDs = make([]int64, 0, len(tsrc.tagSets))
|
||||
for tagID := range tsrc.tagSets {
|
||||
ttsrc.tagIDs = append(ttsrc.tagIDs, tagID)
|
||||
}
|
||||
|
||||
return ttsrc
|
||||
}
|
||||
|
||||
func (ttsrc *TagTableSource) Name() string {
|
||||
return ttsrc.TableSource.Name() + ttsrc.postgresql.TagTableSuffix
|
||||
}
|
||||
|
||||
func (ttsrc *TagTableSource) cacheCheck(tagID int64) bool {
|
||||
// Adding the 2 hashes is good enough. It's not a perfect solution, but given that we're operating in an int64
|
||||
// space, the risk of collision is extremely small.
|
||||
key := ttsrc.tagHashSalt + tagID
|
||||
_, err := ttsrc.postgresql.tagsCache.GetInt(key)
|
||||
return err == nil
|
||||
}
|
||||
func (ttsrc *TagTableSource) cacheTouch(tagID int64) {
|
||||
key := ttsrc.tagHashSalt + tagID
|
||||
//nolint:errcheck // unable to propagate error
|
||||
ttsrc.postgresql.tagsCache.SetInt(key, nil, 0)
|
||||
}
|
||||
|
||||
func (ttsrc *TagTableSource) ColumnNames() []string {
|
||||
cols := ttsrc.TagTableColumns()
|
||||
names := make([]string, 0, len(cols))
|
||||
for _, col := range cols {
|
||||
names = append(names, col.Name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func (ttsrc *TagTableSource) Next() bool {
|
||||
for {
|
||||
if ttsrc.cursor+1 >= len(ttsrc.tagIDs) {
|
||||
ttsrc.cursorValues = nil
|
||||
return false
|
||||
}
|
||||
ttsrc.cursor++
|
||||
|
||||
if ttsrc.cacheCheck(ttsrc.tagIDs[ttsrc.cursor]) {
|
||||
// tag ID already inserted
|
||||
continue
|
||||
}
|
||||
|
||||
ttsrc.cursorValues = ttsrc.getValues()
|
||||
if ttsrc.cursorValues != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ttsrc *TagTableSource) Reset() {
|
||||
ttsrc.cursor = -1
|
||||
}
|
||||
|
||||
func (ttsrc *TagTableSource) getValues() []interface{} {
|
||||
tagID := ttsrc.tagIDs[ttsrc.cursor]
|
||||
tagSet := ttsrc.tagSets[tagID]
|
||||
|
||||
var values []interface{}
|
||||
if !ttsrc.postgresql.TagsAsJsonb {
|
||||
values = make([]interface{}, len(ttsrc.TableSource.tagColumns.indices)+1)
|
||||
for _, tag := range tagSet {
|
||||
values[ttsrc.TableSource.tagColumns.indices[tag.Key]+1] = tag.Value // +1 to account for tag_id column
|
||||
}
|
||||
} else {
|
||||
values = make([]interface{}, 2)
|
||||
values[1] = utils.TagListToJSON(tagSet)
|
||||
}
|
||||
values[0] = tagID
|
||||
|
||||
return values
|
||||
}
|
||||
|
||||
func (ttsrc *TagTableSource) Values() ([]interface{}, error) {
|
||||
return ttsrc.cursorValues, ttsrc.cursorError
|
||||
}
|
||||
|
||||
func (ttsrc *TagTableSource) UpdateCache() {
|
||||
for _, tagID := range ttsrc.tagIDs {
|
||||
ttsrc.cacheTouch(tagID)
|
||||
}
|
||||
}
|
||||
|
||||
func (*TagTableSource) Err() error {
|
||||
return nil
|
||||
}
|
326
plugins/outputs/postgresql/table_source_test.go
Normal file
326
plugins/outputs/postgresql/table_source_test.go
Normal file
|
@ -0,0 +1,326 @@
|
|||
package postgresql
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coocood/freecache"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
|
||||
)
|
||||
|
||||
func TestTableSource(_ *testing.T) {
|
||||
}
|
||||
|
||||
type source interface {
|
||||
pgx.CopyFromSource
|
||||
ColumnNames() []string
|
||||
}
|
||||
|
||||
func nextSrcRow(src source) MSI {
|
||||
if !src.Next() {
|
||||
return nil
|
||||
}
|
||||
row := MSI{}
|
||||
vals, err := src.Values()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for i, name := range src.ColumnNames() {
|
||||
row[name] = vals[i]
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
func TestTableSourceIntegration_tagJSONB(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsJsonb = true
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}),
|
||||
}
|
||||
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
row := nextSrcRow(tsrc)
|
||||
require.NoError(t, tsrc.Err())
|
||||
|
||||
require.IsType(t, time.Time{}, row["time"])
|
||||
var tags MSI
|
||||
require.NoError(t, json.Unmarshal(row["tags"].([]byte), &tags))
|
||||
require.EqualValues(t, MSI{"a": "one", "b": "two"}, tags)
|
||||
require.EqualValues(t, 1, row["v"])
|
||||
}
|
||||
|
||||
func TestTableSourceIntegration_tagTable(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsForeignKeys = true
|
||||
p.tagsCache = freecache.NewCache(5 * 1024 * 1024)
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}),
|
||||
}
|
||||
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
ttsrc := NewTagTableSource(tsrc)
|
||||
ttrow := nextSrcRow(ttsrc)
|
||||
require.EqualValues(t, "one", ttrow["a"])
|
||||
require.EqualValues(t, "two", ttrow["b"])
|
||||
|
||||
row := nextSrcRow(tsrc)
|
||||
require.Equal(t, row["tag_id"], ttrow["tag_id"])
|
||||
}
|
||||
|
||||
func TestTableSourceIntegration_tagTableJSONB(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsForeignKeys = true
|
||||
p.TagsAsJsonb = true
|
||||
p.tagsCache = freecache.NewCache(5 * 1024 * 1024)
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}),
|
||||
}
|
||||
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
ttsrc := NewTagTableSource(tsrc)
|
||||
ttrow := nextSrcRow(ttsrc)
|
||||
var tags MSI
|
||||
require.NoError(t, json.Unmarshal(ttrow["tags"].([]byte), &tags))
|
||||
require.EqualValues(t, MSI{"a": "one", "b": "two"}, tags)
|
||||
}
|
||||
|
||||
func TestTableSourceIntegration_fieldsJSONB(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.FieldsAsJsonb = true
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1, "b": 2}),
|
||||
}
|
||||
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
row := nextSrcRow(tsrc)
|
||||
var fields MSI
|
||||
require.NoError(t, json.Unmarshal(row["fields"].([]byte), &fields))
|
||||
// json unmarshals numbers as floats
|
||||
require.EqualValues(t, MSI{"a": 1.0, "b": 2.0}, fields)
|
||||
}
|
||||
|
||||
// TagsAsForeignKeys=false
|
||||
// Test that when a tag column is dropped, all metrics containing that tag are dropped.
|
||||
func TestTableSourceIntegration_DropColumn_tag(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}),
|
||||
newMetric(t, "", MSS{"a": "one"}, MSI{"v": 2}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
|
||||
// Drop column "b"
|
||||
var col utils.Column
|
||||
for _, c := range tsrc.TagColumns() {
|
||||
if c.Name == "b" {
|
||||
col = c
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NoError(t, tsrc.DropColumn(col))
|
||||
|
||||
row := nextSrcRow(tsrc)
|
||||
require.EqualValues(t, "one", row["a"])
|
||||
require.EqualValues(t, 2, row["v"])
|
||||
require.False(t, tsrc.Next())
|
||||
}
|
||||
|
||||
// TagsAsForeignKeys=true, ForeignTagConstraint=true
|
||||
// Test that when a tag column is dropped, all metrics containing that tag are dropped.
|
||||
func TestTableSourceIntegration_DropColumn_tag_fkTrue_fcTrue(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsForeignKeys = true
|
||||
p.ForeignTagConstraint = true
|
||||
p.tagsCache = freecache.NewCache(5 * 1024 * 1024)
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}),
|
||||
newMetric(t, "", MSS{"a": "one"}, MSI{"v": 2}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
|
||||
// Drop column "b"
|
||||
var col utils.Column
|
||||
for _, c := range tsrc.TagColumns() {
|
||||
if c.Name == "b" {
|
||||
col = c
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NoError(t, tsrc.DropColumn(col))
|
||||
|
||||
ttsrc := NewTagTableSource(tsrc)
|
||||
row := nextSrcRow(ttsrc)
|
||||
require.EqualValues(t, "one", row["a"])
|
||||
require.False(t, ttsrc.Next())
|
||||
|
||||
row = nextSrcRow(tsrc)
|
||||
require.EqualValues(t, 2, row["v"])
|
||||
require.False(t, tsrc.Next())
|
||||
}
|
||||
|
||||
// TagsAsForeignKeys=true, ForeignTagConstraint=false
|
||||
// Test that when a tag column is dropped, metrics are still added while the tag is not.
|
||||
func TestTableSourceIntegration_DropColumn_tag_fkTrue_fcFalse(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsForeignKeys = true
|
||||
p.ForeignTagConstraint = false
|
||||
p.tagsCache = freecache.NewCache(5 * 1024 * 1024)
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}),
|
||||
newMetric(t, "", MSS{"a": "one"}, MSI{"v": 2}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
|
||||
// Drop column "b"
|
||||
var col utils.Column
|
||||
for _, c := range tsrc.TagColumns() {
|
||||
if c.Name == "b" {
|
||||
col = c
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NoError(t, tsrc.DropColumn(col))
|
||||
|
||||
ttsrc := NewTagTableSource(tsrc)
|
||||
row := nextSrcRow(ttsrc)
|
||||
require.EqualValues(t, "one", row["a"])
|
||||
require.False(t, ttsrc.Next())
|
||||
|
||||
row = nextSrcRow(tsrc)
|
||||
require.EqualValues(t, 1, row["v"])
|
||||
row = nextSrcRow(tsrc)
|
||||
require.EqualValues(t, 2, row["v"])
|
||||
}
|
||||
|
||||
// Test that when a field is dropped, only the field is dropped, and all rows remain, unless it was the only field.
|
||||
func TestTableSourceIntegration_DropColumn_field(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
|
||||
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2, "b": 3}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
|
||||
// Drop column "a"
|
||||
var col utils.Column
|
||||
for _, c := range tsrc.FieldColumns() {
|
||||
if c.Name == "a" {
|
||||
col = c
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NoError(t, tsrc.DropColumn(col))
|
||||
|
||||
row := nextSrcRow(tsrc)
|
||||
require.EqualValues(t, "foo", row["tag"])
|
||||
require.EqualValues(t, 3, row["b"])
|
||||
require.False(t, tsrc.Next())
|
||||
}
|
||||
|
||||
func TestTableSourceIntegration_InconsistentTags(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"a": "1"}, MSI{"b": 2}),
|
||||
newMetric(t, "", MSS{"c": "3"}, MSI{"d": 4}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
|
||||
trow := nextSrcRow(tsrc)
|
||||
require.EqualValues(t, "1", trow["a"])
|
||||
require.Nil(t, trow["c"])
|
||||
|
||||
trow = nextSrcRow(tsrc)
|
||||
require.Nil(t, trow["a"])
|
||||
require.EqualValues(t, "3", trow["c"])
|
||||
}
|
||||
|
||||
func TestTagTableSourceIntegration_InconsistentTags(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p, err := newPostgresqlTest(t)
|
||||
require.NoError(t, err)
|
||||
p.TagsAsForeignKeys = true
|
||||
p.tagsCache = freecache.NewCache(5 * 1024 * 1024)
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
newMetric(t, "", MSS{"a": "1"}, MSI{"b": 2}),
|
||||
newMetric(t, "", MSS{"c": "3"}, MSI{"d": 4}),
|
||||
}
|
||||
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
|
||||
ttsrc := NewTagTableSource(tsrc)
|
||||
|
||||
// ttsrc is in non-deterministic order
|
||||
expected := []MSI{
|
||||
{"a": "1", "c": nil},
|
||||
{"a": nil, "c": "3"},
|
||||
}
|
||||
|
||||
var actual []MSI
|
||||
for row := nextSrcRow(ttsrc); row != nil; row = nextSrcRow(ttsrc) {
|
||||
delete(row, "tag_id")
|
||||
actual = append(actual, row)
|
||||
}
|
||||
|
||||
require.ElementsMatch(t, expected, actual)
|
||||
}
|
53
plugins/outputs/postgresql/utils/column.go
Normal file
53
plugins/outputs/postgresql/utils/column.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package utils
|
||||
|
||||
// This is split out from the 'postgresql' package as its depended upon by both the 'postgresql' and
|
||||
// 'postgresql/template' packages.
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ColumnRole specifies the role of a column in a metric.
|
||||
// It helps map the columns to the DB.
|
||||
type ColumnRole int
|
||||
|
||||
const (
|
||||
TimeColType ColumnRole = iota + 1
|
||||
TagsIDColType
|
||||
TagColType
|
||||
FieldColType
|
||||
)
|
||||
|
||||
type Column struct {
|
||||
Name string
|
||||
// the data type of each column should have in the db. used when checking
|
||||
// if the schema matches or it needs updates
|
||||
Type string
|
||||
// the role each column has, helps properly map the metric to the db
|
||||
Role ColumnRole
|
||||
}
|
||||
|
||||
// ColumnList implements sort.Interface.
|
||||
// Columns are sorted first into groups of time,tag_id,tags,fields, and then alphabetically within
|
||||
// each group.
|
||||
type ColumnList []Column
|
||||
|
||||
func (cl ColumnList) Len() int {
|
||||
return len(cl)
|
||||
}
|
||||
|
||||
func (cl ColumnList) Less(i, j int) bool {
|
||||
if cl[i].Role != cl[j].Role {
|
||||
return cl[i].Role < cl[j].Role
|
||||
}
|
||||
return strings.ToLower(cl[i].Name) < strings.ToLower(cl[j].Name)
|
||||
}
|
||||
|
||||
func (cl ColumnList) Swap(i, j int) {
|
||||
cl[i], cl[j] = cl[j], cl[i]
|
||||
}
|
||||
|
||||
func (cl ColumnList) Sort() {
|
||||
sort.Sort(cl)
|
||||
}
|
117
plugins/outputs/postgresql/utils/utils.go
Normal file
117
plugins/outputs/postgresql/utils/utils.go
Normal file
|
@ -0,0 +1,117 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"hash/fnv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
func TagListToJSON(tagList []*telegraf.Tag) []byte {
|
||||
tags := make(map[string]string, len(tagList))
|
||||
for _, tag := range tagList {
|
||||
tags[tag.Key] = tag.Value
|
||||
}
|
||||
//nolint:errcheck // unable to propagate error
|
||||
bs, _ := json.Marshal(tags)
|
||||
return bs
|
||||
}
|
||||
|
||||
func FieldListToJSON(fieldList []*telegraf.Field) ([]byte, error) {
|
||||
fields := make(map[string]interface{}, len(fieldList))
|
||||
for _, field := range fieldList {
|
||||
fields[field.Key] = field.Value
|
||||
}
|
||||
return json.Marshal(fields)
|
||||
}
|
||||
|
||||
// QuoteIdentifier returns a sanitized string safe to use in SQL as an identifier
|
||||
func QuoteIdentifier(name string) string {
|
||||
return pgx.Identifier{name}.Sanitize()
|
||||
}
|
||||
|
||||
// QuoteLiteral returns a sanitized string safe to use in sql as a string literal
|
||||
func QuoteLiteral(name string) string {
|
||||
return "'" + strings.Replace(name, "'", "''", -1) + "'"
|
||||
}
|
||||
|
||||
// FullTableName returns a sanitized table name with its schema (if supplied)
|
||||
func FullTableName(schema, name string) pgx.Identifier {
|
||||
if schema != "" {
|
||||
return pgx.Identifier{schema, name}
|
||||
}
|
||||
|
||||
return pgx.Identifier{name}
|
||||
}
|
||||
|
||||
// PGXLogger makes telegraf.Logger compatible with pgx.Logger
|
||||
type PGXLogger struct {
|
||||
telegraf.Logger
|
||||
}
|
||||
|
||||
func (l PGXLogger) Log(_ context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
|
||||
switch level {
|
||||
case pgx.LogLevelError:
|
||||
l.Errorf("PG %s - %+v", msg, data)
|
||||
case pgx.LogLevelWarn:
|
||||
l.Warnf("PG %s - %+v", msg, data)
|
||||
case pgx.LogLevelInfo, pgx.LogLevelNone:
|
||||
l.Infof("PG %s - %+v", msg, data)
|
||||
case pgx.LogLevelDebug, pgx.LogLevelTrace:
|
||||
l.Debugf("PG %s - %+v", msg, data)
|
||||
default:
|
||||
l.Debugf("PG %s - %+v", msg, data)
|
||||
}
|
||||
}
|
||||
|
||||
func GetTagID(metric telegraf.Metric) int64 {
|
||||
hash := fnv.New64a()
|
||||
for _, tag := range metric.TagList() {
|
||||
hash.Write([]byte(tag.Key))
|
||||
hash.Write([]byte{0})
|
||||
hash.Write([]byte(tag.Value))
|
||||
hash.Write([]byte{0})
|
||||
}
|
||||
// Convert to int64 as postgres does not support uint64
|
||||
return int64(hash.Sum64())
|
||||
}
|
||||
|
||||
// WaitGroup is similar to sync.WaitGroup, but allows interruptible waiting (e.g. a timeout).
|
||||
type WaitGroup struct {
|
||||
count int32
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func NewWaitGroup() *WaitGroup {
|
||||
return &WaitGroup{
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (wg *WaitGroup) Add(i int32) {
|
||||
select {
|
||||
case <-wg.done:
|
||||
panic("use of an already-done WaitGroup")
|
||||
default:
|
||||
}
|
||||
atomic.AddInt32(&wg.count, i)
|
||||
}
|
||||
|
||||
func (wg *WaitGroup) Done() {
|
||||
i := atomic.AddInt32(&wg.count, -1)
|
||||
if i == 0 {
|
||||
close(wg.done)
|
||||
}
|
||||
if i < 0 {
|
||||
panic("too many Done() calls")
|
||||
}
|
||||
}
|
||||
|
||||
func (wg *WaitGroup) C() <-chan struct{} {
|
||||
return wg.done
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue