1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,240 @@
# ClickHouse Input Plugin
This plugin gathers statistics data from a [ClickHouse server][clickhouse].
Users on Clickhouse Cloud will not see the Zookeeper metrics as they may not
have permissions to query those tables.
⭐ Telegraf v1.14.0
🏷️ server
💻 all
[clickhouse]: https://github.com/ClickHouse/ClickHouse
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Read metrics from one or many ClickHouse servers
[[inputs.clickhouse]]
## Username for authorization on ClickHouse server
username = "default"
## Password for authorization on ClickHouse server
# password = ""
## HTTP(s) timeout while getting metrics values
## The timeout includes connection time, any redirects, and reading the
## response body.
# timeout = 5s
## List of servers for metrics scraping
## metrics scrape via HTTP(s) clickhouse interface
## https://clickhouse.tech/docs/en/interfaces/http/
servers = ["http://127.0.0.1:8123"]
## Server Variant
## When set to "managed", some queries are excluded from being run. This is
## useful for instances hosted in ClickHouse Cloud where certain tables are
## not available.
# variant = "self-hosted"
## If "auto_discovery"" is "true" plugin tries to connect to all servers
## available in the cluster with using same "user:password" described in
## "user" and "password" parameters and get this server hostname list from
## "system.clusters" table. See
## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters
## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers
## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/
## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables
# auto_discovery = true
## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
## when this filter present then "WHERE cluster IN (...)" filter will apply
## please use only full cluster names here, regexp and glob filters is not
## allowed for "/etc/clickhouse-server/config.d/remote.xml"
## <yandex>
## <remote_servers>
## <my-own-cluster>
## <shard>
## <replica><host>clickhouse-ru-1.local</host><port>9000</port></replica>
## <replica><host>clickhouse-ru-2.local</host><port>9000</port></replica>
## </shard>
## <shard>
## <replica><host>clickhouse-eu-1.local</host><port>9000</port></replica>
## <replica><host>clickhouse-eu-2.local</host><port>9000</port></replica>
## </shard>
## </my-own-cluster>
## </remote_servers>
##
## </yandex>
##
## example: cluster_include = ["my-own-cluster"]
# cluster_include = []
## Filter cluster names in "system.clusters" when "auto_discovery" is
## "true" when this filter present then "WHERE cluster NOT IN (...)"
## filter will apply
## example: cluster_exclude = ["my-internal-not-discovered-cluster"]
# cluster_exclude = []
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```
## Metrics
- clickhouse_events (see [system.events][] for details)
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- all rows from [system.events][]
- clickhouse_metrics (see [system.metrics][] for details)
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- all rows from [system.metrics][]
- clickhouse_asynchronous_metrics (see [system.asynchronous_metrics][]
for details)
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- all rows from [system.asynchronous_metrics][]
- clickhouse_tables
- tags:
- source (ClickHouse server hostname)
- table
- database
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- bytes
- parts
- rows
- clickhouse_zookeeper (see [system.zookeeper][] for details)
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- root_nodes (count of node where path=/)
- clickhouse_replication_queue (see [system.replication_queue][] for details)
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- too_many_tries_replicas (count of replicas which have `num_tries > 1`)
- clickhouse_detached_parts (see [system.detached_parts][] for details)
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- detached_parts (total detached parts for all tables and databases
from [system.detached_parts][])
- clickhouse_dictionaries (see [system.dictionaries][] for details)
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- dict_origin (xml Filename when dictionary created from *_dictionary.xml,
database.table when dictionary created from DDL)
- fields:
- is_loaded (0 - when dictionary data not successful load, 1 - when
dictionary data loading fail
- bytes_allocated (bytes allocated in RAM after a dictionary loaded)
- clickhouse_mutations (see [system.mutations][] for details)
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- running - gauge which show how much mutation doesn't complete now
- failed - counter which show total failed mutations from first
clickhouse-server run
- completed - counter which show total successful finished mutations
from first clickhouse-server run
- clickhouse_disks (see [system.disks][] for details)
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- name (disk name in storage configuration)
- path (path to disk)
- fields:
- free_space_percent - 0-100, gauge which show current percent of
free disk space bytes relative to total disk space bytes
- keep_free_space_percent - 0-100, gauge which show current percent
of required keep free disk bytes relative to total disk space bytes
- clickhouse_processes (see [system.processes][] for details)
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- percentile_50 - float gauge which show 50% percentile (quantile 0.5) for
`elapsed` field of running processes
- percentile_90 - float gauge which show 90% percentile (quantile 0.9) for
`elapsed` field of running processes
- longest_running - float gauge which show maximum value for `elapsed`
field of running processes
- clickhouse_text_log (see [system.text_log][] for details)
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- level (message level, only messages with level less or equal Notice are
collected)
- fields:
- messages_last_10_min - gauge which show how many messages collected
## Example Output
```text
clickhouse_events,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 read_compressed_bytes=212i,arena_alloc_chunks=35i,function_execute=85i,merge_tree_data_writer_rows=3i,rw_lock_acquired_read_locks=421i,file_open=46i,io_buffer_alloc_bytes=86451985i,inserted_bytes=196i,regexp_created=3i,real_time_microseconds=116832i,query=23i,network_receive_elapsed_microseconds=268i,merge_tree_data_writer_compressed_bytes=1080i,arena_alloc_bytes=212992i,disk_write_elapsed_microseconds=556i,inserted_rows=3i,compressed_read_buffer_bytes=81i,read_buffer_from_file_descriptor_read_bytes=148i,write_buffer_from_file_descriptor_write=47i,merge_tree_data_writer_blocks=3i,soft_page_faults=896i,hard_page_faults=7i,select_query=21i,merge_tree_data_writer_uncompressed_bytes=196i,merge_tree_data_writer_blocks_already_sorted=3i,user_time_microseconds=40196i,compressed_read_buffer_blocks=5i,write_buffer_from_file_descriptor_write_bytes=3246i,io_buffer_allocs=296i,created_write_buffer_ordinary=12i,disk_read_elapsed_microseconds=59347044i,network_send_elapsed_microseconds=1538i,context_lock=1040i,insert_query=1i,system_time_microseconds=14582i,read_buffer_from_file_descriptor_read=3i 1569421000000000000
clickhouse_asynchronous_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 jemalloc.metadata_thp=0i,replicas_max_relative_delay=0i,jemalloc.mapped=1803177984i,jemalloc.allocated=1724839256i,jemalloc.background_thread.run_interval=0i,jemalloc.background_thread.num_threads=0i,uncompressed_cache_cells=0i,replicas_max_absolute_delay=0i,mark_cache_bytes=0i,compiled_expression_cache_count=0i,replicas_sum_queue_size=0i,number_of_tables=35i,replicas_max_merges_in_queue=0i,replicas_max_inserts_in_queue=0i,replicas_sum_merges_in_queue=0i,replicas_max_queue_size=0i,mark_cache_files=0i,jemalloc.background_thread.num_runs=0i,jemalloc.active=1726210048i,uptime=158i,jemalloc.retained=380481536i,replicas_sum_inserts_in_queue=0i,uncompressed_cache_bytes=0i,number_of_databases=2i,jemalloc.metadata=9207704i,max_part_count_for_partition=1i,jemalloc.resident=1742442496i 1569421000000000000
clickhouse_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 replicated_send=0i,write=0i,ephemeral_node=0i,zoo_keeper_request=0i,distributed_files_to_insert=0i,replicated_fetch=0i,background_schedule_pool_task=0i,interserver_connection=0i,leader_replica=0i,delayed_inserts=0i,global_thread_active=41i,merge=0i,readonly_replica=0i,memory_tracking_in_background_schedule_pool=0i,memory_tracking_for_merges=0i,zoo_keeper_session=0i,context_lock_wait=0i,storage_buffer_bytes=0i,background_pool_task=0i,send_external_tables=0i,zoo_keeper_watch=0i,part_mutation=0i,disk_space_reserved_for_merge=0i,distributed_send=0i,version_integer=19014003i,local_thread=0i,replicated_checks=0i,memory_tracking=0i,memory_tracking_in_background_processing_pool=0i,leader_election=0i,revision=54425i,open_file_for_read=0i,open_file_for_write=0i,storage_buffer_rows=0i,rw_lock_waiting_readers=0i,rw_lock_waiting_writers=0i,rw_lock_active_writers=0i,local_thread_active=0i,query_preempted=0i,tcp_connection=1i,http_connection=1i,read=2i,query_thread=0i,dict_cache_requests=0i,rw_lock_active_readers=1i,global_thread=43i,query=1i 1569421000000000000
clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=system,host=kshvakov,source=localhost,shard_num=1,table=trace_log bytes=754i,parts=1i,rows=1i 1569421000000000000
clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=default,host=kshvakov,source=localhost,shard_num=1,table=example bytes=326i,parts=2i,rows=2i 1569421000000000000
```
[system.asynchronous_metrics]: https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metrics/
[system.detached_parts]: https://clickhouse.tech/docs/en/operations/system-tables/detached_parts/
[system.dictionaries]: https://clickhouse.tech/docs/en/operations/system-tables/dictionaries/
[system.disks]: https://clickhouse.tech/docs/en/operations/system-tables/disks/
[system.events]: https://clickhouse.tech/docs/en/operations/system-tables/events/
[system.metrics]: https://clickhouse.tech/docs/en/operations/system-tables/metrics/
[system.mutations]: https://clickhouse.tech/docs/en/operations/system-tables/mutations/
[system.processes]: https://clickhouse.tech/docs/en/operations/system-tables/processes/
[system.replication_queue]:https://clickhouse.com/docs/en/operations/system-tables/replication_queue/
[system.text_log]: https://clickhouse.tech/docs/en/operations/system-tables/text_log/
[system.zookeeper]: https://clickhouse.tech/docs/en/operations/system-tables/zookeeper/

View file

@ -0,0 +1,640 @@
//go:generate ../../../tools/readme_config_includer/generator
package clickhouse
import (
"bytes"
_ "embed"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
var defaultTimeout = 5 * time.Second
type ClickHouse struct {
Username string `toml:"username"`
Password string `toml:"password"`
Servers []string `toml:"servers"`
AutoDiscovery bool `toml:"auto_discovery"`
ClusterInclude []string `toml:"cluster_include"`
ClusterExclude []string `toml:"cluster_exclude"`
Timeout config.Duration `toml:"timeout"`
Variant string `toml:"variant"`
HTTPClient http.Client
tls.ClientConfig
}
type connect struct {
Cluster string `json:"cluster"`
ShardNum int `json:"shard_num"`
Hostname string `json:"host_name"`
url *url.URL
}
func (*ClickHouse) SampleConfig() string {
return sampleConfig
}
func (ch *ClickHouse) Init() error {
switch ch.Variant {
case "":
ch.Variant = "self-hosted"
case "self-hosted", "managed":
// valid options
default:
return fmt.Errorf("unknown variant %q", ch.Variant)
}
return nil
}
// Start ClickHouse input service
func (ch *ClickHouse) Start(telegraf.Accumulator) error {
timeout := defaultTimeout
if time.Duration(ch.Timeout) != 0 {
timeout = time.Duration(ch.Timeout)
}
tlsCfg, err := ch.ClientConfig.TLSConfig()
if err != nil {
return err
}
ch.HTTPClient = http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
Proxy: http.ProxyFromEnvironment,
MaxIdleConnsPerHost: 1,
},
}
return nil
}
// Gather collect data from ClickHouse server
func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) {
var (
connects []connect
exists = func(host string) bool {
for _, c := range connects {
if c.Hostname == host {
return true
}
}
return false
}
)
for _, server := range ch.Servers {
u, err := url.Parse(server)
if err != nil {
return err
}
switch {
case ch.AutoDiscovery:
var conns []connect
if err := ch.execQuery(u, "SELECT cluster, shard_num, host_name FROM system.clusters "+ch.clusterIncludeExcludeFilter(), &conns); err != nil {
acc.AddError(err)
continue
}
for _, c := range conns {
if !exists(c.Hostname) {
c.url = &url.URL{
Scheme: u.Scheme,
Host: net.JoinHostPort(c.Hostname, u.Port()),
}
connects = append(connects, c)
}
}
default:
connects = append(connects, connect{
Hostname: u.Hostname(),
url: u,
})
}
}
for i := range connects {
metricsFuncs := []func(acc telegraf.Accumulator, conn *connect) error{
ch.tables,
ch.replicationQueue,
ch.detachedParts,
ch.dictionaries,
ch.mutations,
ch.disks,
ch.processes,
ch.textLog,
}
// Managed instances on Clickhouse Cloud does not give a user
// permissions to the zookeeper table
if ch.Variant != "managed" {
metricsFuncs = append(metricsFuncs, ch.zookeeper)
}
for _, metricFunc := range metricsFuncs {
if err := metricFunc(acc, &connects[i]); err != nil {
acc.AddError(err)
}
}
for metric := range commonMetrics {
if err := ch.commonMetrics(acc, &connects[i], metric); err != nil {
acc.AddError(err)
}
}
}
return nil
}
func (ch *ClickHouse) Stop() {
ch.HTTPClient.CloseIdleConnections()
}
func (ch *ClickHouse) clusterIncludeExcludeFilter() string {
if len(ch.ClusterInclude) == 0 && len(ch.ClusterExclude) == 0 {
return ""
}
var (
escape = func(in string) string {
return "'" + strings.NewReplacer(`\`, `\\`, `'`, `\'`).Replace(in) + "'"
}
makeFilter = func(expr string, args []string) string {
in := make([]string, 0, len(args))
for _, v := range args {
in = append(in, escape(v))
}
return fmt.Sprintf("cluster %s (%s)", expr, strings.Join(in, ", "))
}
includeFilter, excludeFilter string
)
if len(ch.ClusterInclude) != 0 {
includeFilter = makeFilter("IN", ch.ClusterInclude)
}
if len(ch.ClusterExclude) != 0 {
excludeFilter = makeFilter("NOT IN", ch.ClusterExclude)
}
if includeFilter != "" && excludeFilter != "" {
return "WHERE " + includeFilter + " OR " + excludeFilter
}
if includeFilter == "" && excludeFilter != "" {
return "WHERE " + excludeFilter
}
return "WHERE " + includeFilter
}
func (ch *ClickHouse) commonMetrics(acc telegraf.Accumulator, conn *connect, metric string) error {
var intResult []struct {
Metric string `json:"metric"`
Value chUInt64 `json:"value"`
}
var floatResult []struct {
Metric string `json:"metric"`
Value float64 `json:"value"`
}
tags := makeDefaultTags(conn)
fields := make(map[string]interface{})
if commonMetricsIsFloat[metric] {
if err := ch.execQuery(conn.url, commonMetrics[metric], &floatResult); err != nil {
return err
}
for _, r := range floatResult {
fields[internal.SnakeCase(r.Metric)] = r.Value
}
} else {
if err := ch.execQuery(conn.url, commonMetrics[metric], &intResult); err != nil {
return err
}
for _, r := range intResult {
fields[internal.SnakeCase(r.Metric)] = uint64(r.Value)
}
}
acc.AddFields("clickhouse_"+metric, fields, tags)
return nil
}
func (ch *ClickHouse) zookeeper(acc telegraf.Accumulator, conn *connect) error {
var zkExists []struct {
ZkExists chUInt64 `json:"zk_exists"`
}
if err := ch.execQuery(conn.url, systemZookeeperExistsSQL, &zkExists); err != nil {
return err
}
tags := makeDefaultTags(conn)
if len(zkExists) > 0 && zkExists[0].ZkExists > 0 {
var zkRootNodes []struct {
ZkRootNodes chUInt64 `json:"zk_root_nodes"`
}
if err := ch.execQuery(conn.url, systemZookeeperRootNodesSQL, &zkRootNodes); err != nil {
return err
}
acc.AddFields("clickhouse_zookeeper",
map[string]interface{}{
"root_nodes": uint64(zkRootNodes[0].ZkRootNodes),
},
tags,
)
}
return nil
}
func (ch *ClickHouse) replicationQueue(acc telegraf.Accumulator, conn *connect) error {
var replicationQueueExists []struct {
ReplicationQueueExists chUInt64 `json:"replication_queue_exists"`
}
if err := ch.execQuery(conn.url, systemReplicationExistsSQL, &replicationQueueExists); err != nil {
return err
}
tags := makeDefaultTags(conn)
if len(replicationQueueExists) > 0 && replicationQueueExists[0].ReplicationQueueExists > 0 {
var replicationTooManyTries []struct {
NumTriesReplicas chUInt64 `json:"replication_num_tries_replicas"`
TooManyTriesReplicas chUInt64 `json:"replication_too_many_tries_replicas"`
}
if err := ch.execQuery(conn.url, systemReplicationNumTriesSQL, &replicationTooManyTries); err != nil {
return err
}
acc.AddFields("clickhouse_replication_queue",
map[string]interface{}{
"too_many_tries_replicas": uint64(replicationTooManyTries[0].TooManyTriesReplicas),
"num_tries_replicas": uint64(replicationTooManyTries[0].NumTriesReplicas),
},
tags,
)
}
return nil
}
func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) error {
var detachedParts []struct {
DetachedParts chUInt64 `json:"detached_parts"`
}
if err := ch.execQuery(conn.url, systemDetachedPartsSQL, &detachedParts); err != nil {
return err
}
if len(detachedParts) > 0 {
tags := makeDefaultTags(conn)
acc.AddFields("clickhouse_detached_parts",
map[string]interface{}{
"detached_parts": uint64(detachedParts[0].DetachedParts),
},
tags,
)
}
return nil
}
func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) error {
var brokenDictionaries []struct {
Origin string `json:"origin"`
BytesAllocated chUInt64 `json:"bytes_allocated"`
Status string `json:"status"`
}
if err := ch.execQuery(conn.url, systemDictionariesSQL, &brokenDictionaries); err != nil {
return err
}
for _, dict := range brokenDictionaries {
tags := makeDefaultTags(conn)
isLoaded := uint64(1)
if dict.Status != "LOADED" {
isLoaded = 0
}
if dict.Origin != "" {
tags["dict_origin"] = dict.Origin
acc.AddFields("clickhouse_dictionaries",
map[string]interface{}{
"is_loaded": isLoaded,
"bytes_allocated": uint64(dict.BytesAllocated),
},
tags,
)
}
}
return nil
}
func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error {
var mutationsStatus []struct {
Failed chUInt64 `json:"failed"`
Running chUInt64 `json:"running"`
Completed chUInt64 `json:"completed"`
}
if err := ch.execQuery(conn.url, systemMutationSQL, &mutationsStatus); err != nil {
return err
}
if len(mutationsStatus) > 0 {
tags := makeDefaultTags(conn)
acc.AddFields("clickhouse_mutations",
map[string]interface{}{
"failed": uint64(mutationsStatus[0].Failed),
"running": uint64(mutationsStatus[0].Running),
"completed": uint64(mutationsStatus[0].Completed),
},
tags,
)
}
return nil
}
func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error {
var disksStatus []struct {
Name string `json:"name"`
Path string `json:"path"`
FreePercent chUInt64 `json:"free_space_percent"`
KeepFreePercent chUInt64 `json:"keep_free_space_percent"`
}
if err := ch.execQuery(conn.url, systemDisksSQL, &disksStatus); err != nil {
return err
}
for _, disk := range disksStatus {
tags := makeDefaultTags(conn)
tags["name"] = disk.Name
tags["path"] = disk.Path
acc.AddFields("clickhouse_disks",
map[string]interface{}{
"free_space_percent": uint64(disk.FreePercent),
"keep_free_space_percent": uint64(disk.KeepFreePercent),
},
tags,
)
}
return nil
}
func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error {
var processesStats []struct {
QueryType string `json:"query_type"`
Percentile50 float64 `json:"p50"`
Percentile90 float64 `json:"p90"`
LongestRunning float64 `json:"longest_running"`
}
if err := ch.execQuery(conn.url, systemProcessesSQL, &processesStats); err != nil {
return err
}
for _, process := range processesStats {
tags := makeDefaultTags(conn)
tags["query_type"] = process.QueryType
acc.AddFields("clickhouse_processes",
map[string]interface{}{
"percentile_50": process.Percentile50,
"percentile_90": process.Percentile90,
"longest_running": process.LongestRunning,
},
tags,
)
}
return nil
}
func (ch *ClickHouse) textLog(acc telegraf.Accumulator, conn *connect) error {
var textLogExists []struct {
TextLogExists chUInt64 `json:"text_log_exists"`
}
if err := ch.execQuery(conn.url, systemTextLogExistsSQL, &textLogExists); err != nil {
return err
}
if len(textLogExists) > 0 && textLogExists[0].TextLogExists > 0 {
var textLogLast10MinMessages []struct {
Level string `json:"level"`
MessagesLast10Min chUInt64 `json:"messages_last_10_min"`
}
if err := ch.execQuery(conn.url, systemTextLogSQL, &textLogLast10MinMessages); err != nil {
return err
}
for _, textLogItem := range textLogLast10MinMessages {
tags := makeDefaultTags(conn)
tags["level"] = textLogItem.Level
acc.AddFields("clickhouse_text_log",
map[string]interface{}{
"messages_last_10_min": uint64(textLogItem.MessagesLast10Min),
},
tags,
)
}
}
return nil
}
func (ch *ClickHouse) tables(acc telegraf.Accumulator, conn *connect) error {
var parts []struct {
Database string `json:"database"`
Table string `json:"table"`
Bytes chUInt64 `json:"bytes"`
Parts chUInt64 `json:"parts"`
Rows chUInt64 `json:"rows"`
}
if err := ch.execQuery(conn.url, systemPartsSQL, &parts); err != nil {
return err
}
tags := makeDefaultTags(conn)
for _, part := range parts {
tags["table"] = part.Table
tags["database"] = part.Database
acc.AddFields("clickhouse_tables",
map[string]interface{}{
"bytes": uint64(part.Bytes),
"parts": uint64(part.Parts),
"rows": uint64(part.Rows),
},
tags,
)
}
return nil
}
func makeDefaultTags(conn *connect) map[string]string {
tags := map[string]string{
"source": conn.Hostname,
}
if len(conn.Cluster) != 0 {
tags["cluster"] = conn.Cluster
}
if conn.ShardNum != 0 {
tags["shard_num"] = strconv.Itoa(conn.ShardNum)
}
return tags
}
type clickhouseError struct {
StatusCode int
body []byte
}
func (e *clickhouseError) Error() string {
return fmt.Sprintf("received error code %d: %s", e.StatusCode, e.body)
}
func (ch *ClickHouse) execQuery(address *url.URL, query string, i interface{}) error {
q := address.Query()
q.Set("query", query+" FORMAT JSON")
address.RawQuery = q.Encode()
req, err := http.NewRequest("GET", address.String(), nil)
if err != nil {
return err
}
if ch.Username != "" {
req.Header.Add("X-ClickHouse-User", ch.Username)
}
if ch.Password != "" {
req.Header.Add("X-ClickHouse-Key", ch.Password)
}
resp, err := ch.HTTPClient.Do(req)
if err != nil {
return err
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode >= 300 {
//nolint:errcheck // reading body for error reporting
body, _ := io.ReadAll(io.LimitReader(resp.Body, 200))
return &clickhouseError{
StatusCode: resp.StatusCode,
body: body,
}
}
var response struct {
Data json.RawMessage
}
if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
return err
}
if err := json.Unmarshal(response.Data, i); err != nil {
return err
}
if _, err := io.Copy(io.Discard, resp.Body); err != nil {
return err
}
return nil
}
// see https://clickhouse.yandex/docs/en/operations/settings/settings/#session_settings-output_format_json_quote_64bit_integers
type chUInt64 uint64
// UnmarshalJSON is a custom unmarshaler for chUInt64 to handle ClickHouse's JSON format.
func (i *chUInt64) UnmarshalJSON(b []byte) error {
b = bytes.TrimPrefix(b, []byte(`"`))
b = bytes.TrimSuffix(b, []byte(`"`))
v, err := strconv.ParseUint(string(b), 10, 64)
if err != nil {
return err
}
*i = chUInt64(v)
return nil
}
const (
systemEventsSQL = "SELECT event AS metric, toUInt64(value) AS value FROM system.events"
systemMetricsSQL = "SELECT metric, toUInt64(value) AS value FROM system.metrics"
systemAsyncMetricsSQL = "SELECT metric, toFloat64(value) AS value FROM system.asynchronous_metrics"
systemPartsSQL = `
SELECT
database,
table,
SUM(bytes) AS bytes,
COUNT(*) AS parts,
SUM(rows) AS rows
FROM system.parts
WHERE active = 1
GROUP BY
database, table
ORDER BY
database, table
`
systemZookeeperExistsSQL = "SELECT count() AS zk_exists FROM system.tables WHERE database='system' AND name='zookeeper'"
systemZookeeperRootNodesSQL = "SELECT count() AS zk_root_nodes FROM system.zookeeper WHERE path='/'"
systemReplicationExistsSQL = "SELECT count() AS replication_queue_exists FROM system.tables WHERE database='system' AND name='replication_queue'"
systemReplicationNumTriesSQL = "SELECT countIf(num_tries>1) AS replication_num_tries_replicas, countIf(num_tries>100) " +
"AS replication_too_many_tries_replicas FROM system.replication_queue SETTINGS empty_result_for_aggregation_by_empty_set=0"
systemDetachedPartsSQL = "SELECT count() AS detached_parts FROM system.detached_parts SETTINGS empty_result_for_aggregation_by_empty_set=0"
systemDictionariesSQL = "SELECT origin, status, bytes_allocated FROM system.dictionaries"
systemMutationSQL = "SELECT countIf(latest_fail_time>toDateTime('0000-00-00 00:00:00') AND is_done=0) " +
"AS failed, countIf(latest_fail_time=toDateTime('0000-00-00 00:00:00') AND is_done=0) " +
"AS running, countIf(is_done=1) AS completed FROM system.mutations SETTINGS empty_result_for_aggregation_by_empty_set=0"
systemDisksSQL = "SELECT name, path, toUInt64(100*free_space / total_space) " +
"AS free_space_percent, toUInt64( 100 * keep_free_space / total_space) AS keep_free_space_percent FROM system.disks"
systemProcessesSQL = "SELECT multiIf(positionCaseInsensitive(query,'select')=1,'select',positionCaseInsensitive(query,'insert')=1,'insert','other') " +
"AS query_type, quantile\n(0.5)(elapsed) AS p50, quantile(0.9)(elapsed) AS p90, max(elapsed) AS longest_running " +
"FROM system.processes GROUP BY query_type SETTINGS empty_result_for_aggregation_by_empty_set=0"
systemTextLogExistsSQL = "SELECT count() AS text_log_exists FROM system.tables WHERE database='system' AND name='text_log'"
systemTextLogSQL = "SELECT count() AS messages_last_10_min, level FROM system.text_log " +
"WHERE level <= 'Notice' AND event_time >= now() - INTERVAL 600 SECOND GROUP BY level SETTINGS empty_result_for_aggregation_by_empty_set=0"
)
var commonMetrics = map[string]string{
"events": systemEventsSQL,
"metrics": systemMetricsSQL,
"asynchronous_metrics": systemAsyncMetricsSQL,
}
var commonMetricsIsFloat = map[string]bool{
"events": false,
"metrics": false,
"asynchronous_metrics": true,
}
var _ telegraf.ServiceInput = &ClickHouse{}
func init() {
inputs.Add("clickhouse", func() telegraf.Input {
return &ClickHouse{
AutoDiscovery: true,
ClientConfig: tls.ClientConfig{
InsecureSkipVerify: false,
},
Timeout: config.Duration(defaultTimeout),
}
})
}

View file

@ -0,0 +1,736 @@
package clickhouse
import (
"encoding/json"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
func TestClusterIncludeExcludeFilter(t *testing.T) {
ch := ClickHouse{}
require.Empty(t, ch.clusterIncludeExcludeFilter())
ch.ClusterExclude = []string{"test_cluster"}
require.Equal(t, "WHERE cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter())
ch.ClusterExclude = []string{"test_cluster"}
ch.ClusterInclude = []string{"cluster"}
require.Equal(t, "WHERE cluster IN ('cluster') OR cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter())
ch.ClusterExclude = make([]string, 0)
ch.ClusterInclude = []string{"cluster1", "cluster2"}
require.Equal(t, "WHERE cluster IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter())
ch.ClusterExclude = []string{"cluster1", "cluster2"}
ch.ClusterInclude = make([]string, 0)
require.Equal(t, "WHERE cluster NOT IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter())
}
func TestChInt64(t *testing.T) {
assets := map[string]uint64{
`"1"`: 1,
"1": 1,
"42": 42,
`"42"`: 42,
"18446743937525109187": 18446743937525109187,
}
for src, expected := range assets {
var v chUInt64
err := v.UnmarshalJSON([]byte(src))
require.NoError(t, err)
require.Equal(t, expected, uint64(v))
}
}
func TestGather(t *testing.T) {
var (
ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
type result struct {
Data interface{} `json:"data"`
}
enc := json.NewEncoder(w)
switch query := r.URL.Query().Get("query"); {
case strings.Contains(query, "system.parts"):
err := enc.Encode(result{
Data: []struct {
Database string `json:"database"`
Table string `json:"table"`
Bytes chUInt64 `json:"bytes"`
Parts chUInt64 `json:"parts"`
Rows chUInt64 `json:"rows"`
}{
{
Database: "test_database",
Table: "test_table",
Bytes: 1,
Parts: 10,
Rows: 100,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "system.events"):
err := enc.Encode(result{
Data: []struct {
Metric string `json:"metric"`
Value chUInt64 `json:"value"`
}{
{
Metric: "TestSystemEvent",
Value: 1000,
},
{
Metric: "TestSystemEvent2",
Value: 2000,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "system.metrics"):
err := enc.Encode(result{
Data: []struct {
Metric string `json:"metric"`
Value chUInt64 `json:"value"`
}{
{
Metric: "TestSystemMetric",
Value: 1000,
},
{
Metric: "TestSystemMetric2",
Value: 2000,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "system.asynchronous_metrics"):
err := enc.Encode(result{
Data: []struct {
Metric string `json:"metric"`
Value chUInt64 `json:"value"`
}{
{
Metric: "TestSystemAsynchronousMetric",
Value: 1000,
},
{
Metric: "TestSystemAsynchronousMetric2",
Value: 2000,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "zk_exists"):
err := enc.Encode(result{
Data: []struct {
ZkExists chUInt64 `json:"zk_exists"`
}{
{
ZkExists: 1,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "zk_root_nodes"):
err := enc.Encode(result{
Data: []struct {
ZkRootNodes chUInt64 `json:"zk_root_nodes"`
}{
{
ZkRootNodes: 2,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "replication_queue_exists"):
err := enc.Encode(result{
Data: []struct {
ReplicationQueueExists chUInt64 `json:"replication_queue_exists"`
}{
{
ReplicationQueueExists: 1,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "replication_too_many_tries_replicas"):
err := enc.Encode(result{
Data: []struct {
TooManyTriesReplicas chUInt64 `json:"replication_too_many_tries_replicas"`
NumTriesReplicas chUInt64 `json:"replication_num_tries_replicas"`
}{
{
TooManyTriesReplicas: 10,
NumTriesReplicas: 100,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "system.detached_parts"):
err := enc.Encode(result{
Data: []struct {
DetachedParts chUInt64 `json:"detached_parts"`
}{
{
DetachedParts: 10,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "system.dictionaries"):
err := enc.Encode(result{
Data: []struct {
Origin string `json:"origin"`
Status string `json:"status"`
BytesAllocated chUInt64 `json:"bytes_allocated"`
}{
{
Origin: "default.test_dict",
Status: "NOT_LOADED",
BytesAllocated: 100,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "system.mutations"):
err := enc.Encode(result{
Data: []struct {
Failed chUInt64 `json:"failed"`
Completed chUInt64 `json:"completed"`
Running chUInt64 `json:"running"`
}{
{
Failed: 10,
Running: 1,
Completed: 100,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "system.disks"):
err := enc.Encode(result{
Data: []struct {
Name string `json:"name"`
Path string `json:"path"`
FreePercent chUInt64 `json:"free_space_percent"`
KeepFreePercent chUInt64 `json:"keep_free_space_percent"`
}{
{
Name: "default",
Path: "/var/lib/clickhouse",
FreePercent: 1,
KeepFreePercent: 10,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "system.processes"):
err := enc.Encode(result{
Data: []struct {
QueryType string `json:"query_type"`
Percentile50 float64 `json:"p50"`
Percentile90 float64 `json:"p90"`
LongestRunning float64 `json:"longest_running"`
}{
{
QueryType: "select",
Percentile50: 0.1,
Percentile90: 0.5,
LongestRunning: 10,
},
{
QueryType: "insert",
Percentile50: 0.2,
Percentile90: 1.5,
LongestRunning: 100,
},
{
QueryType: "other",
Percentile50: 0.4,
Percentile90: 4.5,
LongestRunning: 1000,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "text_log_exists"):
err := enc.Encode(result{
Data: []struct {
TextLogExists chUInt64 `json:"text_log_exists"`
}{
{
TextLogExists: 1,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "system.text_log"):
err := enc.Encode(result{
Data: []struct {
Level string `json:"level"`
LastMessagesLast10Min chUInt64 `json:"messages_last_10_min"`
}{
{
Level: "Fatal",
LastMessagesLast10Min: 0,
},
{
Level: "Critical",
LastMessagesLast10Min: 10,
},
{
Level: "Error",
LastMessagesLast10Min: 20,
},
{
Level: "Warning",
LastMessagesLast10Min: 30,
},
{
Level: "Notice",
LastMessagesLast10Min: 40,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
}
}))
ch = &ClickHouse{
Servers: []string{
ts.URL,
},
}
acc = &testutil.Accumulator{}
)
defer ts.Close()
require.NoError(t, ch.Gather(acc))
acc.AssertContainsTaggedFields(t, "clickhouse_tables",
map[string]interface{}{
"bytes": uint64(1),
"parts": uint64(10),
"rows": uint64(100),
},
map[string]string{
"source": "127.0.0.1",
"table": "test_table",
"database": "test_database",
},
)
acc.AssertContainsFields(t, "clickhouse_events",
map[string]interface{}{
"test_system_event": uint64(1000),
"test_system_event2": uint64(2000),
},
)
acc.AssertContainsFields(t, "clickhouse_metrics",
map[string]interface{}{
"test_system_metric": uint64(1000),
"test_system_metric2": uint64(2000),
},
)
acc.AssertContainsFields(t, "clickhouse_asynchronous_metrics",
map[string]interface{}{
"test_system_asynchronous_metric": float64(1000),
"test_system_asynchronous_metric2": float64(2000),
},
)
acc.AssertContainsFields(t, "clickhouse_zookeeper",
map[string]interface{}{
"root_nodes": uint64(2),
},
)
acc.AssertContainsFields(t, "clickhouse_replication_queue",
map[string]interface{}{
"too_many_tries_replicas": uint64(10),
"num_tries_replicas": uint64(100),
},
)
acc.AssertContainsFields(t, "clickhouse_detached_parts",
map[string]interface{}{
"detached_parts": uint64(10),
},
)
acc.AssertContainsTaggedFields(t, "clickhouse_dictionaries",
map[string]interface{}{
"is_loaded": uint64(0),
"bytes_allocated": uint64(100),
},
map[string]string{
"source": "127.0.0.1",
"dict_origin": "default.test_dict",
},
)
acc.AssertContainsFields(t, "clickhouse_mutations",
map[string]interface{}{
"running": uint64(1),
"failed": uint64(10),
"completed": uint64(100),
},
)
acc.AssertContainsTaggedFields(t, "clickhouse_disks",
map[string]interface{}{
"free_space_percent": uint64(1),
"keep_free_space_percent": uint64(10),
},
map[string]string{
"source": "127.0.0.1",
"name": "default",
"path": "/var/lib/clickhouse",
},
)
acc.AssertContainsTaggedFields(t, "clickhouse_processes",
map[string]interface{}{
"percentile_50": 0.1,
"percentile_90": 0.5,
"longest_running": float64(10),
},
map[string]string{
"source": "127.0.0.1",
"query_type": "select",
},
)
acc.AssertContainsTaggedFields(t, "clickhouse_processes",
map[string]interface{}{
"percentile_50": 0.2,
"percentile_90": 1.5,
"longest_running": float64(100),
},
map[string]string{
"source": "127.0.0.1",
"query_type": "insert",
},
)
acc.AssertContainsTaggedFields(t, "clickhouse_processes",
map[string]interface{}{
"percentile_50": 0.4,
"percentile_90": 4.5,
"longest_running": float64(1000),
},
map[string]string{
"source": "127.0.0.1",
"query_type": "other",
},
)
for i, level := range []string{"Fatal", "Critical", "Error", "Warning", "Notice"} {
acc.AssertContainsTaggedFields(t, "clickhouse_text_log",
map[string]interface{}{
"messages_last_10_min": uint64(i * 10),
},
map[string]string{
"source": "127.0.0.1",
"level": level,
},
)
}
}
func TestGatherWithSomeTablesNotExists(t *testing.T) {
var (
ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
type result struct {
Data interface{} `json:"data"`
}
enc := json.NewEncoder(w)
switch query := r.URL.Query().Get("query"); {
case strings.Contains(query, "zk_exists"):
err := enc.Encode(result{
Data: []struct {
ZkExists chUInt64 `json:"zk_exists"`
}{
{
ZkExists: 0,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "replication_queue_exists"):
err := enc.Encode(result{
Data: []struct {
ReplicationQueueExists chUInt64 `json:"replication_queue_exists"`
}{
{
ReplicationQueueExists: 0,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "text_log_exists"):
err := enc.Encode(result{
Data: []struct {
TextLogExists chUInt64 `json:"text_log_exists"`
}{
{
TextLogExists: 0,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
}
}))
ch = &ClickHouse{
Servers: []string{
ts.URL,
},
Username: "default",
}
acc = &testutil.Accumulator{}
)
defer ts.Close()
require.NoError(t, ch.Gather(acc))
acc.AssertDoesNotContainMeasurement(t, "clickhouse_zookeeper")
acc.AssertDoesNotContainMeasurement(t, "clickhouse_replication_queue")
acc.AssertDoesNotContainMeasurement(t, "clickhouse_text_log")
}
func TestGatherClickhouseCloud(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
type result struct {
Data interface{} `json:"data"`
}
enc := json.NewEncoder(w)
switch query := r.URL.Query().Get("query"); {
case strings.Contains(query, "zk_exists"):
err := enc.Encode(result{
Data: []struct {
ZkExists chUInt64 `json:"zk_exists"`
}{
{
ZkExists: 1,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
case strings.Contains(query, "zk_root_nodes"):
err := enc.Encode(result{
Data: []struct {
ZkRootNodes chUInt64 `json:"zk_root_nodes"`
}{
{
ZkRootNodes: 2,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
}
}))
defer ts.Close()
ch := &ClickHouse{
Servers: []string{ts.URL},
Variant: "managed",
}
acc := &testutil.Accumulator{}
require.NoError(t, ch.Gather(acc))
acc.AssertDoesNotContainMeasurement(t, "clickhouse_zookeeper")
}
func TestWrongJSONMarshalling(t *testing.T) {
var (
ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
type result struct {
Data interface{} `json:"data"`
}
enc := json.NewEncoder(w)
// wrong data section json
err := enc.Encode(result{
Data: make([]struct{}, 0),
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
}))
ch = &ClickHouse{
Servers: []string{
ts.URL,
},
Username: "default",
}
acc = &testutil.Accumulator{}
)
defer ts.Close()
require.NoError(t, ch.Gather(acc))
require.Empty(t, acc.Metrics)
allMeasurements := []string{
"clickhouse_events",
"clickhouse_metrics",
"clickhouse_asynchronous_metrics",
"clickhouse_tables",
"clickhouse_zookeeper",
"clickhouse_replication_queue",
"clickhouse_detached_parts",
"clickhouse_dictionaries",
"clickhouse_mutations",
"clickhouse_disks",
"clickhouse_processes",
"clickhouse_text_log",
}
require.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors))
}
func TestOfflineServer(t *testing.T) {
var (
acc = &testutil.Accumulator{}
ch = &ClickHouse{
Servers: []string{
"http://wrong-domain.local:8123",
},
Username: "default",
HTTPClient: http.Client{
Timeout: 1 * time.Millisecond,
},
}
)
require.NoError(t, ch.Gather(acc))
require.Empty(t, acc.Metrics)
allMeasurements := []string{
"clickhouse_events",
"clickhouse_metrics",
"clickhouse_asynchronous_metrics",
"clickhouse_tables",
"clickhouse_zookeeper",
"clickhouse_replication_queue",
"clickhouse_detached_parts",
"clickhouse_dictionaries",
"clickhouse_mutations",
"clickhouse_disks",
"clickhouse_processes",
"clickhouse_text_log",
}
require.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors))
}
func TestAutoDiscovery(t *testing.T) {
var (
ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
type result struct {
Data interface{} `json:"data"`
}
enc := json.NewEncoder(w)
query := r.URL.Query().Get("query")
if strings.Contains(query, "system.clusters") {
err := enc.Encode(result{
Data: []struct {
Cluster string `json:"test"`
Hostname string `json:"localhost"`
ShardNum chUInt64 `json:"shard_num"`
}{
{
Cluster: "test_database",
Hostname: "test_table",
ShardNum: 1,
},
},
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error(err)
return
}
}
}))
ch = &ClickHouse{
Servers: []string{
ts.URL,
},
Username: "default",
AutoDiscovery: true,
}
acc = &testutil.Accumulator{}
)
defer ts.Close()
require.NoError(t, ch.Gather(acc))
}

View file

@ -0,0 +1,13 @@
-----BEGIN DH PARAMETERS-----
MIICCAKCAgEAoo1x7wI5K57P1/AkHUmVWzKNfy46b/ni/QtClomTB78Ks1FP8dzs
CQBW/pfL8yidxTialNhMRCZO1J+uPjTvd8dG8SFZzVylkF41LBNrUD+MLyh/b6Nr
8uWf3tqYCtsiqsQsnq/oU7C29wn6UjhPPVbRRDPGyJUFOgp0ebPR0L2gOc5HhXSF
Tt0fuWnvgZJBKGvyodby3p2CSheu8K6ZteVc8ZgHuanhCQA30nVN+yNQzyozlB2H
B9jxTDPJy8+/4Mui3iiNyXg6FaiI9lWdH7xgKoZlHi8BWlLz5Se9JVNYg0dPrMTz
K0itQyyTKUlK73x+1uPm6q1AJwz08EZiCXNbk58/Sf+pdwDmAO2QSRrERC73vnvc
B1+4+Kf7RS7oYpAHknKm/MFnkCJLVIq1b6kikYcIgVCYe+Z1UytSmG1QfwdgL8QQ
TVYVHBg4w07+s3/IJ1ekvNhdxpkmmevYt7GjohWu8vKkip4se+reNdo+sqLsgFKf
1IuDMD36zn9FVukvs7e3BwZCTkdosGHvHGjA7zm2DwPPO16hCvJ4mE6ULLpp2NEw
EBYWm3Tv6M/xtrF5Afyh0gAh7eL767/qsarbx6jlqs+dnh3LptqsE3WerWK54+0B
3Hr5CVfgYbeXuW2HeFb+fS6CNUWmiAsq1XRiz5p16hpeMGYN/qyF1IsCAQI=
-----END DH PARAMETERS-----

View file

@ -0,0 +1,30 @@
version: '3'
services:
clickhouse:
# choose `:latest` after resolve https://github.com/ClickHouse/ClickHouse/issues/13057
image: docker.io/yandex/clickhouse-server:${CLICKHOUSE_VERSION:-latest}
volumes:
- ./init_schema.sql:/docker-entrypoint-initdb.d/init_schema.sql
- ./test_dictionary.xml:/etc/clickhouse-server/01-test_dictionary.xml
- ./zookeeper.xml:/etc/clickhouse-server/config.d/00-zookeeper.xml
- ./tls_settings.xml:/etc/clickhouse-server/config.d/01-tls_settings.xml
# please comment text_log.xml when CLICKHOUSE_VERSION = 19.16
- ./text_log.xml:/etc/clickhouse-server/config.d/02-text_log.xml
- ./part_log.xml:/etc/clickhouse-server/config.d/03-part_log.xml
- ./mysql_port.xml:/etc/clickhouse-server/config.d/04-mysql_port.xml
- ./dhparam.pem:/etc/clickhouse-server/dhparam.pem
- ../../../../testutil/pki/serverkey.pem:/etc/clickhouse-server/server.key
- ../../../../testutil/pki/servercert.pem:/etc/clickhouse-server/server.crt
ports:
- 3306:3306
- 8123:8123
- 8443:8443
- 9000:9000
- 9009:9009
zookeeper:
image: docker.io/zookeeper:3.5.6
volumes:
- /var/lib/zookeeper
ports:
- 2181:2181

View file

@ -0,0 +1,6 @@
DROP TABLE IF EXISTS default.test;
CREATE TABLE default.test(
Nom String,
Code Nullable(String) DEFAULT Null,
Cur Nullable(String) DEFAULT Null
) ENGINE=MergeTree() ORDER BY tuple();

View file

@ -0,0 +1,3 @@
<yandex>
<mysql_port>3306</mysql_port>
</yandex>

View file

@ -0,0 +1,12 @@
<yandex>
<part_log>
<database>system</database>
<table>part_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
<!-- 19.16 -->
<partition_by>event_date</partition_by>
<!-- 20.5 -->
<!-- <engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine> -->
</part_log>
</yandex>

View file

@ -0,0 +1,12 @@
### ClickHouse input plugin
[[inputs.clickhouse]]
timeout = 2
username = "default"
servers = ["http://127.0.0.1:8123"]
auto_discovery = true
cluster_include = []
cluster_exclude = ["test_shard_localhost"]
[[outputs.file]]
files = ["stdout"]

View file

@ -0,0 +1,16 @@
### ClickHouse input plugin
[[inputs.clickhouse]]
timeout = 2
username = "default"
servers = ["https://127.0.0.1:8443"]
auto_discovery = true
cluster_include = []
cluster_exclude = ["test_shard_localhost"]
insecure_skip_verify = false
tls_cert = "./testutil/pki/clientcert.pem"
tls_key = "./testutil/pki/clientkey.pem"
tls_ca = "./testutil/pki/cacert.pem"
[[outputs.file]]
files = ["stdout"]

View file

@ -0,0 +1,62 @@
<!--
CREATE DICTIONARY IF NOT EXISTS default.test_dict1(
Nom String,
Code Nullable(String) DEFAULT Null,
Cur Nullable(String) DEFAULT Null
) PRIMARY KEY nom
SOURCE(
MYSQL(port 3306 host '127.0.0.1' user 'default' password '' db 'default' table 'test')
)
LAYOUT(COMPLEX_KEY_HASHED())
LIFETIME(MIN 300 MAX 600);
-->
<yandex>
<dictionary>
<name>default.test_dict</name>
<structure>
<!-- Complex key configuration -->
<key>
<attribute>
<name>Nom</name>
<type>String</type>
<null_value></null_value>
</attribute>
</key>
<!-- Attribute parameters -->
<attribute>
<name>Code</name>
<type>String</type>
<null_value></null_value>
</attribute>
<attribute>
<name>Cur</name>
<type>String</type>
<null_value></null_value>
</attribute>
</structure>
<source>
<!-- Source configuration -->
<mysql>
<port>3306</port>
<user>default</user>
<password/>
<replica>
<host>127.0.0.1</host>
<priority>1</priority>
</replica>
<db>default</db>
<table>test</table>
</mysql>
</source>
<layout>
<!-- Memory layout configuration -->
<complex_key_hashed />
</layout>
<!-- Lifetime of dictionary in memory -->
<lifetime>300</lifetime>
</dictionary>
</yandex>

View file

@ -0,0 +1,12 @@
<yandex>
<text_log>
<level>notice</level>
<database>system</database>
<table>text_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
<!-- 19.17 -->
<partition_by>event_date</partition_by>
<!-- 20.5 -->
<!-- <engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine> -->
</text_log>
</yandex>

View file

@ -0,0 +1,4 @@
<yandex>
<https_port>8443</https_port>
<tcp_port_secure>9440</tcp_port_secure>
</yandex>

View file

@ -0,0 +1,19 @@
<yandex>
<zookeeper>
<node>
<host>zookeeper</host>
<port>2181</port>
</node>
</zookeeper>
<remote_servers replace="1">
<test>
<shard>
<internal_replication>1</internal_replication>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</test>
</remote_servers>
</yandex>

View file

@ -0,0 +1,69 @@
# Read metrics from one or many ClickHouse servers
[[inputs.clickhouse]]
## Username for authorization on ClickHouse server
username = "default"
## Password for authorization on ClickHouse server
# password = ""
## HTTP(s) timeout while getting metrics values
## The timeout includes connection time, any redirects, and reading the
## response body.
# timeout = 5s
## List of servers for metrics scraping
## metrics scrape via HTTP(s) clickhouse interface
## https://clickhouse.tech/docs/en/interfaces/http/
servers = ["http://127.0.0.1:8123"]
## Server Variant
## When set to "managed", some queries are excluded from being run. This is
## useful for instances hosted in ClickHouse Cloud where certain tables are
## not available.
# variant = "self-hosted"
## If "auto_discovery"" is "true" plugin tries to connect to all servers
## available in the cluster with using same "user:password" described in
## "user" and "password" parameters and get this server hostname list from
## "system.clusters" table. See
## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters
## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers
## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/
## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables
# auto_discovery = true
## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
## when this filter present then "WHERE cluster IN (...)" filter will apply
## please use only full cluster names here, regexp and glob filters is not
## allowed for "/etc/clickhouse-server/config.d/remote.xml"
## <yandex>
## <remote_servers>
## <my-own-cluster>
## <shard>
## <replica><host>clickhouse-ru-1.local</host><port>9000</port></replica>
## <replica><host>clickhouse-ru-2.local</host><port>9000</port></replica>
## </shard>
## <shard>
## <replica><host>clickhouse-eu-1.local</host><port>9000</port></replica>
## <replica><host>clickhouse-eu-2.local</host><port>9000</port></replica>
## </shard>
## </my-own-cluster>
## </remote_servers>
##
## </yandex>
##
## example: cluster_include = ["my-own-cluster"]
# cluster_include = []
## Filter cluster names in "system.clusters" when "auto_discovery" is
## "true" when this filter present then "WHERE cluster NOT IN (...)"
## filter will apply
## example: cluster_exclude = ["my-internal-not-discovered-cluster"]
# cluster_exclude = []
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false