1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

View file

@ -0,0 +1,174 @@
# OpenTelemetry Input Plugin
This plugin receives traces, metrics and logs from
[OpenTelemetry](https://opentelemetry.io) clients and agents via gRPC.
## Service Input <!-- @/docs/includes/service_input.md -->
This plugin is a service input. Normal plugins gather metrics determined by the
interval setting. Service plugins start a service to listen and wait for
metrics or events to occur. Service plugins have two key differences from
normal plugins:
1. The global or plugin specific `interval` setting may not apply
2. The CLI options of `--test`, `--test-wait`, and `--once` may not produce
output for this plugin
## Global configuration options <!-- @/docs/includes/plugin_config.md -->
In addition to the plugin-specific configuration settings, plugins support
additional global and plugin configuration settings. These settings are used to
modify metrics, tags, and field or create aliases and configure ordering, etc.
See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins
## Configuration
```toml @sample.conf
# Receive OpenTelemetry traces, metrics, and logs over gRPC
[[inputs.opentelemetry]]
## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service
## address:port
# service_address = "0.0.0.0:4317"
## Override the default (5s) new connection timeout
# timeout = "5s"
## gRPC Maximum Message Size
# max_msg_size = "4MB"
## Override the default span attributes to be used as line protocol tags.
## These are always included as tags:
## - trace ID
## - span ID
## Common attributes can be found here:
## - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv
# span_dimensions = ["service.name", "span.name"]
## Override the default log record attributes to be used as line protocol tags.
## These are always included as tags, if available:
## - trace ID
## - span ID
## Common attributes can be found here:
## - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv
## When using InfluxDB for both logs and traces, be certain that log_record_dimensions
## matches the span_dimensions value.
# log_record_dimensions = ["service.name"]
## Override the default profile attributes to be used as line protocol tags.
## These are always included as tags, if available:
## - profile_id
## - address
## - sample
## - sample_name
## - sample_unit
## - sample_type
## - sample_type_unit
## Common attributes can be found here:
## - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv
# profile_dimensions = []
## Override the default (prometheus-v1) metrics schema.
## Supports: "prometheus-v1", "prometheus-v2"
## For more information about the alternatives, read the Prometheus input
## plugin notes.
# metrics_schema = "prometheus-v1"
## Optional TLS Config.
## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md
##
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections.
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
```
### Schema
The OpenTelemetry->InfluxDB conversion [schema][1] and [implementation][2] are
hosted at <https://github.com/influxdata/influxdb-observability> .
Spans are stored in measurement `spans`.
Logs are stored in measurement `logs`.
For metrics, two output schemata exist. Metrics received with
`metrics_schema=prometheus-v1` are assigned measurement from the OTel field
`Metric.name`. Metrics received with `metrics_schema=prometheus-v2` are stored
in measurement `prometheus`.
Also see the OpenTelemetry output plugin for Telegraf.
[1]: https://github.com/influxdata/influxdb-observability/blob/main/docs/index.md
[2]: https://github.com/influxdata/influxdb-observability/tree/main/otel2influx
## Example Output
### Tracing Spans
```text
spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="d5270e78d85f570f",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="4c28227be6a010e1",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689169000
spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="lets-go",net.peer.ip="1.2.3.4",peer.service="tracegen-server",service.name="tracegen",span.kind="client",span_id="d5270e78d85f570f",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689135000
spans end_time_unix_nano="2021-02-19 20:50:25.6895667 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="b57e98af78c3399b",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="a0643a156d7f9f7f",status_code="STATUS_CODE_OK",trace_id="fd6b8bb5965e726c94978c644962cdc8" 1613767825689388000
spans end_time_unix_nano="2021-02-19 20:50:25.6895667 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="lets-go",net.peer.ip="1.2.3.4",peer.service="tracegen-server",service.name="tracegen",span.kind="client",span_id="b57e98af78c3399b",status_code="STATUS_CODE_OK",trace_id="fd6b8bb5965e726c94978c644962cdc8" 1613767825689303300
spans end_time_unix_nano="2021-02-19 20:50:25.6896741 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="6a8e6a0edcc1c966",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="d68f7f3b41eb8075",status_code="STATUS_CODE_OK",trace_id="651dadde186b7834c52b13a28fc27bea" 1613767825689480300
```
## Metrics
### `prometheus-v1`
```text
cpu_temp,foo=bar gauge=87.332
http_requests_total,method=post,code=200 counter=1027
http_requests_total,method=post,code=400 counter=3
http_request_duration_seconds 0.05=24054,0.1=33444,0.2=100392,0.5=129389,1=133988,sum=53423,count=144320
rpc_duration_seconds 0.01=3102,0.05=3272,0.5=4773,0.9=9001,0.99=76656,sum=1.7560473e+07,count=2693
```
### `prometheus-v2`
```text
prometheus,foo=bar cpu_temp=87.332
prometheus,method=post,code=200 http_requests_total=1027
prometheus,method=post,code=400 http_requests_total=3
prometheus,le=0.05 http_request_duration_seconds_bucket=24054
prometheus,le=0.1 http_request_duration_seconds_bucket=33444
prometheus,le=0.2 http_request_duration_seconds_bucket=100392
prometheus,le=0.5 http_request_duration_seconds_bucket=129389
prometheus,le=1 http_request_duration_seconds_bucket=133988
prometheus http_request_duration_seconds_count=144320,http_request_duration_seconds_sum=53423
prometheus,quantile=0.01 rpc_duration_seconds=3102
prometheus,quantile=0.05 rpc_duration_seconds=3272
prometheus,quantile=0.5 rpc_duration_seconds=4773
prometheus,quantile=0.9 rpc_duration_seconds=9001
prometheus,quantile=0.99 rpc_duration_seconds=76656
prometheus rpc_duration_seconds_count=1.7560473e+07,rpc_duration_seconds_sum=2693
```
### Logs
```text
logs fluent.tag="fluent.info",pid=18i,ppid=9i,worker=0i 1613769568895331700
logs fluent.tag="fluent.debug",instance=1720i,queue_size=0i,stage_size=0i 1613769568895697200
logs fluent.tag="fluent.info",worker=0i 1613769568896515100
```
### Profiles
```text
profiles,address=95210353,host.name=testbox,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=0,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="fab9b8c848218405738c11a7ec4982e9",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=18694144u,filename="chromium",frame_type="native",location="",memory_limit=250413056u,memory_start=18698240u,stack_trace_id="hYmAzQVF8vy8MWbzsKpQNw",start_time_unix_nano=1721306050081621681u,value=1i 1721306048731622020
profiles,address=15945263,host.name=testbox,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=1,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="7dab4a2e0005d025e75cc72191f8d6bf",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=15638528u,filename="dockerd",frame_type="native",location="",memory_limit=47255552u,memory_start=15638528u,stack_trace_id="4N3KEcGylb5Qoi2905c1ZA",start_time_unix_nano=1721306050081621681u,value=1i 1721306049831718725
profiles,address=15952400,host.name=testbox,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=1,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="7dab4a2e0005d025e75cc72191f8d6bf",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=15638528u,filename="dockerd",frame_type="native",location="",memory_limit=47255552u,memory_start=15638528u,stack_trace_id="4N3KEcGylb5Qoi2905c1ZA",start_time_unix_nano=1721306050081621681u,value=1i 1721306049831718725
profiles,address=15953899,host.name=testbox,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=1,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="7dab4a2e0005d025e75cc72191f8d6bf",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=15638528u,filename="dockerd",frame_type="native",location="",memory_limit=47255552u,memory_start=15638528u,stack_trace_id="4N3KEcGylb5Qoi2905c1ZA",start_time_unix_nano=1721306050081621681u,value=1i 1721306049831718725
profiles,address=16148175,host.name=testbox,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=1,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="7dab4a2e0005d025e75cc72191f8d6bf",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=15638528u,filename="dockerd",frame_type="native",location="",memory_limit=47255552u,memory_start=15638528u,stack_trace_id="4N3KEcGylb5Qoi2905c1ZA",start_time_unix_nano=1721306050081621681u,value=1i 1721306049831718725
profiles,address=4770577,host.name=testbox,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=2,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="cfc3dc7d1638c1284a6b62d4b5c0d74e",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=0u,filename="",frame_type="kernel",location="do_epoll_wait",memory_limit=0u,memory_start=0u,stack_trace_id="UaO9bysJnAYXFYobSdHXqg",start_time_unix_nano=1721306050081621681u,value=1i 1721306050081621681
profiles,address=4773632,host.name=testbox,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=2,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="cfc3dc7d1638c1284a6b62d4b5c0d74e",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=0u,filename="",frame_type="kernel",location="__x64_sys_epoll_wait",memory_limit=0u,memory_start=0u,stack_trace_id="UaO9bysJnAYXFYobSdHXqg",start_time_unix_nano=1721306050081621681u,value=1i 1721306050081621681
profiles,address=14783666,host.name=testbox,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=2,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="cfc3dc7d1638c1284a6b62d4b5c0d74e",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=0u,filename="",frame_type="kernel",location="do_syscall_64",memory_limit=0u,memory_start=0u,stack_trace_id="UaO9bysJnAYXFYobSdHXqg",start_time_unix_nano=1721306050081621681u,value=1i 1721306050081621681
profiles,address=16777518,host.name=testbox,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=2,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="cfc3dc7d1638c1284a6b62d4b5c0d74e",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=0u,filename="",frame_type="kernel",location="entry_SYSCALL_64_after_hwframe",memory_limit=0u,memory_start=0u,stack_trace_id="UaO9bysJnAYXFYobSdHXqg",start_time_unix_nano=1721306050081621681u,value=1i 1721306050081621681
profiles,address=1139937,host.name=testbox,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=2,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="982ed6c7a77f99f0ae746be0187953bf",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=147456u,filename="libc.so.6",frame_type="native",location="",memory_limit=1638400u,memory_start=147456u,stack_trace_id="UaO9bysJnAYXFYobSdHXqg",start_time_unix_nano=1721306050081621681u,value=1i 1721306050081621681
profiles,address=117834912,host.name=testbox,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=2,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="fab9b8c848218405738c11a7ec4982e9",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=18694144u,filename="chromium",frame_type="native",location="",memory_limit=250413056u,memory_start=18698240u,stack_trace_id="UaO9bysJnAYXFYobSdHXqg",start_time_unix_nano=1721306050081621681u,value=1i 1721306050081621681
```

View file

@ -0,0 +1,135 @@
package opentelemetry
import (
"context"
"encoding/hex"
"fmt"
"strconv"
"strings"
"time"
service "go.opentelemetry.io/proto/otlp/collector/profiles/v1experimental"
"google.golang.org/protobuf/encoding/protojson"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
)
type profileService struct {
service.UnimplementedProfilesServiceServer
acc telegraf.Accumulator
filter filter.Filter
logger telegraf.Logger
}
func newProfileService(acc telegraf.Accumulator, logger telegraf.Logger, dimensions []string) (*profileService, error) {
// Check for duplicate dimensions
seen := make(map[string]bool, len(dimensions))
duplicates := make([]string, 0)
dims := make([]string, 0, len(dimensions))
for _, d := range dimensions {
if seen[d] {
duplicates = append(duplicates, d)
continue
}
dims = append(dims, d)
seen[d] = true
}
if len(duplicates) > 0 {
return nil, fmt.Errorf("duplicate profile dimension(s) configured: %s", strings.Join(duplicates, ","))
}
f, err := filter.Compile(dims)
if err != nil {
return nil, fmt.Errorf("compiling dimensions filter failed: %w", err)
}
return &profileService{
acc: acc,
filter: f,
logger: logger,
}, nil
}
// Export processes and exports the received profile data.
func (s *profileService) Export(_ context.Context, req *service.ExportProfilesServiceRequest) (*service.ExportProfilesServiceResponse, error) {
// Output the received message for debugging
buf, err := protojson.Marshal(req)
if err != nil {
s.logger.Errorf("marshalling received profile failed: %v", err)
} else {
s.logger.Debugf("received profile: %s", string(buf))
}
for _, rp := range req.ResourceProfiles {
// Extract the requested attributes that should be added as tags
attrtags := make(map[string]string)
for _, attr := range rp.Resource.Attributes {
if s.filter.Match(attr.Key) {
attrtags[attr.Key] = attr.GetValue().GetStringValue()
}
}
for _, sp := range rp.ScopeProfiles {
for _, p := range sp.Profiles {
for i, sample := range p.Profile.Sample {
for j := sample.LocationsStartIndex; j < sample.LocationsStartIndex+sample.LocationsLength; j++ {
for validx, value := range sample.Value {
loc := p.Profile.Location[j]
locations := make([]string, 0, len(loc.Line))
for _, line := range loc.Line {
f := p.Profile.Function[line.FunctionIndex]
fileloc := p.Profile.StringTable[f.Filename]
if f.StartLine > 0 {
if fileloc != "" {
fileloc += " "
}
fileloc += "line " + strconv.FormatInt(f.StartLine, 10)
}
l := p.Profile.StringTable[f.Name]
if fileloc != "" {
l += "(" + fileloc + ")"
}
locations = append(locations, l)
}
mapping := p.Profile.Mapping[loc.MappingIndex]
tags := map[string]string{
"profile_id": hex.EncodeToString(p.ProfileId),
"sample": strconv.Itoa(i),
"sample_name": p.Profile.StringTable[p.Profile.PeriodType.Type],
"sample_unit": p.Profile.StringTable[p.Profile.PeriodType.Unit],
"sample_type": p.Profile.StringTable[p.Profile.SampleType[validx].Type],
"sample_type_unit": p.Profile.StringTable[p.Profile.SampleType[validx].Unit],
"address": "0x" + strconv.FormatUint(loc.Address, 16),
}
for k, v := range attrtags {
tags[k] = v
}
fields := map[string]interface{}{
"start_time_unix_nano": p.StartTimeUnixNano,
"end_time_unix_nano": p.EndTimeUnixNano,
"location": strings.Join(locations, ","),
"frame_type": p.Profile.StringTable[loc.TypeIndex],
"stack_trace_id": p.Profile.StringTable[sample.StacktraceIdIndex],
"memory_start": mapping.MemoryStart,
"memory_limit": mapping.MemoryLimit,
"filename": p.Profile.StringTable[mapping.Filename],
"file_offset": mapping.FileOffset,
"build_id": p.Profile.StringTable[mapping.BuildId],
"build_id_type": mapping.BuildIdKind.String(),
"value": value,
}
for _, idx := range sample.Attributes {
attr := p.Profile.AttributeTable[idx]
fields[attr.Key] = attr.GetValue().Value
}
ts := sample.TimestampsUnixNano[validx]
s.acc.AddFields("profiles", fields, tags, time.Unix(0, int64(ts)))
}
}
}
}
}
}
return &service.ExportProfilesServiceResponse{}, nil
}

View file

@ -0,0 +1,103 @@
package opentelemetry
import (
"context"
"fmt"
"github.com/influxdata/influxdb-observability/common"
"github.com/influxdata/influxdb-observability/otel2influx"
"go.opentelemetry.io/collector/pdata/plog/plogotlp"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
)
type traceService struct {
ptraceotlp.UnimplementedGRPCServer
exporter *otel2influx.OtelTracesToLineProtocol
}
var _ ptraceotlp.GRPCServer = (*traceService)(nil)
func newTraceService(logger common.Logger, writer *writeToAccumulator, spanDimensions []string) (*traceService, error) {
expConfig := otel2influx.DefaultOtelTracesToLineProtocolConfig()
expConfig.Logger = logger
expConfig.Writer = writer
expConfig.SpanDimensions = spanDimensions
exp, err := otel2influx.NewOtelTracesToLineProtocol(expConfig)
if err != nil {
return nil, err
}
return &traceService{
exporter: exp,
}, nil
}
// Export processes and exports the trace data received in the request.
func (s *traceService) Export(ctx context.Context, req ptraceotlp.ExportRequest) (ptraceotlp.ExportResponse, error) {
err := s.exporter.WriteTraces(ctx, req.Traces())
return ptraceotlp.NewExportResponse(), err
}
type metricsService struct {
pmetricotlp.UnimplementedGRPCServer
exporter *otel2influx.OtelMetricsToLineProtocol
}
var _ pmetricotlp.GRPCServer = (*metricsService)(nil)
var metricsSchemata = map[string]common.MetricsSchema{
"prometheus-v1": common.MetricsSchemaTelegrafPrometheusV1,
"prometheus-v2": common.MetricsSchemaTelegrafPrometheusV2,
}
func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema string) (*metricsService, error) {
ms, found := metricsSchemata[schema]
if !found {
return nil, fmt.Errorf("schema %q not recognized", schema)
}
expConfig := otel2influx.DefaultOtelMetricsToLineProtocolConfig()
expConfig.Logger = logger
expConfig.Writer = writer
expConfig.Schema = ms
exp, err := otel2influx.NewOtelMetricsToLineProtocol(expConfig)
if err != nil {
return nil, err
}
return &metricsService{
exporter: exp,
}, nil
}
// Export processes and exports the metrics data received in the request.
func (s *metricsService) Export(ctx context.Context, req pmetricotlp.ExportRequest) (pmetricotlp.ExportResponse, error) {
err := s.exporter.WriteMetrics(ctx, req.Metrics())
return pmetricotlp.NewExportResponse(), err
}
type logsService struct {
plogotlp.UnimplementedGRPCServer
converter *otel2influx.OtelLogsToLineProtocol
}
var _ plogotlp.GRPCServer = (*logsService)(nil)
func newLogsService(logger common.Logger, writer *writeToAccumulator, logRecordDimensions []string) (*logsService, error) {
expConfig := otel2influx.DefaultOtelLogsToLineProtocolConfig()
expConfig.Logger = logger
expConfig.Writer = writer
expConfig.LogRecordDimensions = logRecordDimensions
exp, err := otel2influx.NewOtelLogsToLineProtocol(expConfig)
if err != nil {
return nil, err
}
return &logsService{
converter: exp,
}, nil
}
// Export processes and exports the logs data received in the request.
func (s *logsService) Export(ctx context.Context, req plogotlp.ExportRequest) (plogotlp.ExportResponse, error) {
err := s.converter.WriteLogs(ctx, req.Logs())
return plogotlp.NewExportResponse(), err
}

View file

@ -0,0 +1,17 @@
package opentelemetry
import (
"strings"
"github.com/influxdata/telegraf"
)
type otelLogger struct {
telegraf.Logger
}
// Debug logs a debug message, patterned after log.Print.
func (l otelLogger) Debug(msg string, kv ...interface{}) {
format := msg + strings.Repeat(" %s=%q", len(kv)/2)
l.Logger.Debugf(format, kv...)
}

View file

@ -0,0 +1,143 @@
//go:generate ../../../tools/readme_config_includer/generator
package opentelemetry
import (
_ "embed"
"fmt"
"net"
"sync"
"time"
"github.com/influxdata/influxdb-observability/otel2influx"
"go.opentelemetry.io/collector/pdata/plog/plogotlp"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
pprofileotlp "go.opentelemetry.io/proto/otlp/collector/profiles/v1experimental"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
type OpenTelemetry struct {
ServiceAddress string `toml:"service_address"`
SpanDimensions []string `toml:"span_dimensions"`
LogRecordDimensions []string `toml:"log_record_dimensions"`
ProfileDimensions []string `toml:"profile_dimensions"`
MetricsSchema string `toml:"metrics_schema"`
MaxMsgSize config.Size `toml:"max_msg_size"`
Timeout config.Duration `toml:"timeout"`
Log telegraf.Logger `toml:"-"`
tls.ServerConfig
listener net.Listener // overridden in tests
grpcServer *grpc.Server
wg sync.WaitGroup
}
func (*OpenTelemetry) SampleConfig() string {
return sampleConfig
}
func (o *OpenTelemetry) Init() error {
if o.ServiceAddress == "" {
o.ServiceAddress = "0.0.0.0:4317"
}
switch o.MetricsSchema {
case "": // Set default
o.MetricsSchema = "prometheus-v1"
case "prometheus-v1", "prometheus-v2": // Valid values
default:
return fmt.Errorf("invalid metric schema %q", o.MetricsSchema)
}
return nil
}
func (o *OpenTelemetry) Start(acc telegraf.Accumulator) error {
var grpcOptions []grpc.ServerOption
if tlsConfig, err := o.ServerConfig.TLSConfig(); err != nil {
return err
} else if tlsConfig != nil {
grpcOptions = append(grpcOptions, grpc.Creds(credentials.NewTLS(tlsConfig)))
}
if o.Timeout > 0 {
grpcOptions = append(grpcOptions, grpc.ConnectionTimeout(time.Duration(o.Timeout)))
}
if o.MaxMsgSize > 0 {
grpcOptions = append(grpcOptions, grpc.MaxRecvMsgSize(int(o.MaxMsgSize)))
}
logger := &otelLogger{o.Log}
influxWriter := &writeToAccumulator{acc}
o.grpcServer = grpc.NewServer(grpcOptions...)
traceSvc, err := newTraceService(logger, influxWriter, o.SpanDimensions)
if err != nil {
return err
}
ptraceotlp.RegisterGRPCServer(o.grpcServer, traceSvc)
metricsSvc, err := newMetricsService(logger, influxWriter, o.MetricsSchema)
if err != nil {
return err
}
pmetricotlp.RegisterGRPCServer(o.grpcServer, metricsSvc)
logsSvc, err := newLogsService(logger, influxWriter, o.LogRecordDimensions)
if err != nil {
return err
}
plogotlp.RegisterGRPCServer(o.grpcServer, logsSvc)
profileSvc, err := newProfileService(acc, o.Log, o.ProfileDimensions)
if err != nil {
return err
}
pprofileotlp.RegisterProfilesServiceServer(o.grpcServer, profileSvc)
o.listener, err = net.Listen("tcp", o.ServiceAddress)
if err != nil {
return err
}
o.wg.Add(1)
go func() {
defer o.wg.Done()
if err := o.grpcServer.Serve(o.listener); err != nil {
acc.AddError(fmt.Errorf("failed to stop OpenTelemetry gRPC service: %w", err))
}
}()
return nil
}
func (*OpenTelemetry) Gather(telegraf.Accumulator) error {
return nil
}
func (o *OpenTelemetry) Stop() {
if o.grpcServer != nil {
o.grpcServer.Stop()
}
o.listener = nil
o.wg.Wait()
}
func init() {
inputs.Add("opentelemetry", func() telegraf.Input {
return &OpenTelemetry{
SpanDimensions: otel2influx.DefaultOtelTracesToLineProtocolConfig().SpanDimensions,
LogRecordDimensions: otel2influx.DefaultOtelLogsToLineProtocolConfig().LogRecordDimensions,
Timeout: config.Duration(5 * time.Second),
}
})
}

View file

@ -0,0 +1,256 @@
package opentelemetry
import (
"context"
"net"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb-observability/otel2influx"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
"go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
otlplogs "go.opentelemetry.io/proto/otlp/collector/logs/v1"
otlpmetrics "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
otlpprofiles "go.opentelemetry.io/proto/otlp/collector/profiles/v1experimental"
otlptrace "go.opentelemetry.io/proto/otlp/collector/trace/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/protobuf/encoding/protojson"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers/influx"
"github.com/influxdata/telegraf/testutil"
)
func TestOpenTelemetry(t *testing.T) {
// Setup and start the plugin
plugin := &OpenTelemetry{
MetricsSchema: "prometheus-v1",
}
require.NoError(t, plugin.Init())
var acc testutil.Accumulator
require.NoError(t, plugin.Start(&acc))
defer plugin.Stop()
// Setup the OpenTelemetry exporter
ctx, cancel := context.WithTimeout(t.Context(), time.Second)
defer cancel()
exporter, err := otlpmetricgrpc.New(ctx,
otlpmetricgrpc.WithInsecure(),
otlpmetricgrpc.WithDialOption(
grpc.WithContextDialer(func(context.Context, string) (net.Conn, error) {
return net.Dial("tcp", plugin.listener.Addr().String())
})),
)
require.NoError(t, err)
defer exporter.Shutdown(ctx) //nolint:errcheck // We cannot do anything if the shutdown fails
// Setup the metric to send
reader := metric.NewManualReader()
defer reader.Shutdown(ctx) //nolint:errcheck // We cannot do anything if the shutdown fails
provider := metric.NewMeterProvider(metric.WithReader(reader))
meter := provider.Meter("library-name")
counter, err := meter.Int64Counter("measurement-counter")
require.NoError(t, err)
counter.Add(ctx, 7)
// Write the OpenTelemetry metrics
var rm metricdata.ResourceMetrics
require.NoError(t, reader.Collect(ctx, &rm))
require.NoError(t, exporter.Export(ctx, &rm))
// Shutdown
require.NoError(t, reader.Shutdown(ctx))
require.NoError(t, exporter.Shutdown(ctx))
plugin.Stop()
// Check
require.Empty(t, acc.Errors)
var exesuffix string
if runtime.GOOS == "windows" {
exesuffix = ".exe"
}
expected := []telegraf.Metric{
testutil.MustMetric(
"measurement-counter",
map[string]string{
"otel.library.name": "library-name",
"service.name": "unknown_service:opentelemetry.test" + exesuffix,
"telemetry.sdk.language": "go",
"telemetry.sdk.name": "opentelemetry",
"telemetry.sdk.version": "1.27.0",
},
map[string]interface{}{
"counter": 7,
},
time.Unix(0, 0),
telegraf.Counter,
),
}
options := []cmp.Option{
testutil.IgnoreTime(),
testutil.IgnoreFields("start_time_unix_nano"),
testutil.IgnoreTags("telemetry.sdk.version"),
}
actual := acc.GetTelegrafMetrics()
testutil.RequireMetricsEqual(t, expected, actual, options...)
}
func TestCases(t *testing.T) {
// Get all directories in testdata
folders, err := os.ReadDir("testcases")
require.NoError(t, err)
// Register the plugin
inputs.Add("opentelemetry", func() telegraf.Input {
return &OpenTelemetry{
ServiceAddress: "127.0.0.1:0",
SpanDimensions: otel2influx.DefaultOtelTracesToLineProtocolConfig().SpanDimensions,
LogRecordDimensions: otel2influx.DefaultOtelLogsToLineProtocolConfig().LogRecordDimensions,
ProfileDimensions: []string{"host.name"},
Timeout: config.Duration(5 * time.Second),
}
})
// Prepare the influx parser for expectations
parser := &influx.Parser{}
require.NoError(t, parser.Init())
for _, f := range folders {
// Only handle folders
if !f.IsDir() {
continue
}
testcasePath := filepath.Join("testcases", f.Name())
configFilename := filepath.Join(testcasePath, "telegraf.conf")
inputFiles := filepath.Join(testcasePath, "*.json")
expectedFilename := filepath.Join(testcasePath, "expected.out")
expectedErrorFilename := filepath.Join(testcasePath, "expected.err")
// Compare options
options := []cmp.Option{
testutil.IgnoreTime(),
testutil.SortMetrics(),
testutil.IgnoreFields("start_time_unix_nano"),
}
t.Run(f.Name(), func(t *testing.T) {
// Read the input data
inputs := make(map[string][][]byte)
matches, err := filepath.Glob(inputFiles)
require.NoError(t, err)
require.NotEmpty(t, matches)
sort.Strings(matches)
for _, fn := range matches {
buf, err := os.ReadFile(fn)
require.NoError(t, err)
key := strings.TrimSuffix(filepath.Base(fn), ".json")
key, _, _ = strings.Cut(key, "_")
inputs[key] = append(inputs[key], buf)
}
// Read the expected output if any
var expected []telegraf.Metric
if _, err := os.Stat(expectedFilename); err == nil {
var err error
expected, err = testutil.ParseMetricsFromFile(expectedFilename, parser)
require.NoError(t, err)
}
// Read the expected output if any
var expectedErrors []string
if _, err := os.Stat(expectedErrorFilename); err == nil {
var err error
expectedErrors, err = testutil.ParseLinesFromFile(expectedErrorFilename)
require.NoError(t, err)
require.NotEmpty(t, expectedErrors)
}
// Configure the plugin
cfg := config.NewConfig()
require.NoError(t, cfg.LoadConfig(configFilename))
require.Len(t, cfg.Inputs, 1)
// Setup and start the plugin
plugin := cfg.Inputs[0].Input.(*OpenTelemetry)
require.NoError(t, plugin.Init())
var acc testutil.Accumulator
require.NoError(t, plugin.Start(&acc))
defer plugin.Stop()
// Send all data to the plugin
addr := plugin.listener.Addr().String()
ctx, cancel := context.WithTimeout(t.Context(), time.Second)
defer cancel()
grpcClient, err := grpc.NewClient(addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
require.NoError(t, err)
defer grpcClient.Close()
for msgtype, messages := range inputs {
switch msgtype {
case "logs":
client := otlplogs.NewLogsServiceClient(grpcClient)
for _, buf := range messages {
var msg otlplogs.ExportLogsServiceRequest
require.NoError(t, protojson.Unmarshal(buf, &msg))
_, err := client.Export(ctx, &msg)
require.NoError(t, err)
}
case "metrics":
client := otlpmetrics.NewMetricsServiceClient(grpcClient)
for _, buf := range messages {
var msg otlpmetrics.ExportMetricsServiceRequest
require.NoError(t, protojson.Unmarshal(buf, &msg))
_, err := client.Export(ctx, &msg)
require.NoError(t, err)
}
case "profiles":
client := otlpprofiles.NewProfilesServiceClient(grpcClient)
for _, buf := range messages {
var msg otlpprofiles.ExportProfilesServiceRequest
require.NoError(t, protojson.Unmarshal(buf, &msg))
_, err := client.Export(ctx, &msg)
require.NoError(t, err)
}
case "traces":
client := otlptrace.NewTraceServiceClient(grpcClient)
for _, buf := range messages {
var msg otlptrace.ExportTraceServiceRequest
require.NoError(t, protojson.Unmarshal(buf, &msg))
_, err := client.Export(ctx, &msg)
require.NoError(t, err)
}
}
}
// Close the plugin to make sure all data is flushed
require.NoError(t, grpcClient.Close())
plugin.Stop()
// Check the metrics
require.Eventually(t, func() bool {
return acc.NMetrics() >= uint64(len(expected))
}, 3*time.Second, 100*time.Millisecond)
require.Empty(t, acc.Errors)
actual := acc.GetTelegrafMetrics()
testutil.RequireMetricsEqual(t, expected, actual, options...)
})
}
}

View file

@ -0,0 +1,58 @@
# Receive OpenTelemetry traces, metrics, and logs over gRPC
[[inputs.opentelemetry]]
## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service
## address:port
# service_address = "0.0.0.0:4317"
## Override the default (5s) new connection timeout
# timeout = "5s"
## gRPC Maximum Message Size
# max_msg_size = "4MB"
## Override the default span attributes to be used as line protocol tags.
## These are always included as tags:
## - trace ID
## - span ID
## Common attributes can be found here:
## - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv
# span_dimensions = ["service.name", "span.name"]
## Override the default log record attributes to be used as line protocol tags.
## These are always included as tags, if available:
## - trace ID
## - span ID
## Common attributes can be found here:
## - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv
## When using InfluxDB for both logs and traces, be certain that log_record_dimensions
## matches the span_dimensions value.
# log_record_dimensions = ["service.name"]
## Override the default profile attributes to be used as line protocol tags.
## These are always included as tags, if available:
## - profile_id
## - address
## - sample
## - sample_name
## - sample_unit
## - sample_type
## - sample_type_unit
## Common attributes can be found here:
## - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv
# profile_dimensions = []
## Override the default (prometheus-v1) metrics schema.
## Supports: "prometheus-v1", "prometheus-v2"
## For more information about the alternatives, read the Prometheus input
## plugin notes.
# metrics_schema = "prometheus-v1"
## Optional TLS Config.
## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md
##
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections.
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"

View file

@ -0,0 +1,11 @@
profiles,address=0x5accb71,host.name=Hugin,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=0,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="fab9b8c848218405738c11a7ec4982e9",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=18694144u,filename="chromium",frame_type="native",location="",memory_limit=250413056u,memory_start=18698240u,stack_trace_id="hYmAzQVF8vy8MWbzsKpQNw",start_time_unix_nano=1721306050081621681u,value=1i 1721306048731622020
profiles,address=0xf34e2f,host.name=Hugin,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=1,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="7dab4a2e0005d025e75cc72191f8d6bf",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=15638528u,filename="dockerd",frame_type="native",location="",memory_limit=47255552u,memory_start=15638528u,stack_trace_id="4N3KEcGylb5Qoi2905c1ZA",start_time_unix_nano=1721306050081621681u,value=1i 1721306049831718725
profiles,address=0xf36a10,host.name=Hugin,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=1,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="7dab4a2e0005d025e75cc72191f8d6bf",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=15638528u,filename="dockerd",frame_type="native",location="",memory_limit=47255552u,memory_start=15638528u,stack_trace_id="4N3KEcGylb5Qoi2905c1ZA",start_time_unix_nano=1721306050081621681u,value=1i 1721306049831718725
profiles,address=0xf36feb,host.name=Hugin,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=1,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="7dab4a2e0005d025e75cc72191f8d6bf",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=15638528u,filename="dockerd",frame_type="native",location="",memory_limit=47255552u,memory_start=15638528u,stack_trace_id="4N3KEcGylb5Qoi2905c1ZA",start_time_unix_nano=1721306050081621681u,value=1i 1721306049831718725
profiles,address=0xf666cf,host.name=Hugin,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=1,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="7dab4a2e0005d025e75cc72191f8d6bf",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=15638528u,filename="dockerd",frame_type="native",location="",memory_limit=47255552u,memory_start=15638528u,stack_trace_id="4N3KEcGylb5Qoi2905c1ZA",start_time_unix_nano=1721306050081621681u,value=1i 1721306049831718725
profiles,address=0x48cb11,host.name=Hugin,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=2,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="cfc3dc7d1638c1284a6b62d4b5c0d74e",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=0u,filename="",frame_type="kernel",location="do_epoll_wait",memory_limit=0u,memory_start=0u,stack_trace_id="UaO9bysJnAYXFYobSdHXqg",start_time_unix_nano=1721306050081621681u,value=1i 1721306050081621681
profiles,address=0x48d700,host.name=Hugin,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=2,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="cfc3dc7d1638c1284a6b62d4b5c0d74e",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=0u,filename="",frame_type="kernel",location="__x64_sys_epoll_wait",memory_limit=0u,memory_start=0u,stack_trace_id="UaO9bysJnAYXFYobSdHXqg",start_time_unix_nano=1721306050081621681u,value=1i 1721306050081621681
profiles,address=0xe194b2,host.name=Hugin,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=2,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="cfc3dc7d1638c1284a6b62d4b5c0d74e",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=0u,filename="",frame_type="kernel",location="do_syscall_64",memory_limit=0u,memory_start=0u,stack_trace_id="UaO9bysJnAYXFYobSdHXqg",start_time_unix_nano=1721306050081621681u,value=1i 1721306050081621681
profiles,address=0x100012e,host.name=Hugin,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=2,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="cfc3dc7d1638c1284a6b62d4b5c0d74e",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=0u,filename="",frame_type="kernel",location="entry_SYSCALL_64_after_hwframe",memory_limit=0u,memory_start=0u,stack_trace_id="UaO9bysJnAYXFYobSdHXqg",start_time_unix_nano=1721306050081621681u,value=1i 1721306050081621681
profiles,address=0x1164e1,host.name=Hugin,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=2,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="982ed6c7a77f99f0ae746be0187953bf",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=147456u,filename="libc.so.6",frame_type="native",location="",memory_limit=1638400u,memory_start=147456u,stack_trace_id="UaO9bysJnAYXFYobSdHXqg",start_time_unix_nano=1721306050081621681u,value=1i 1721306050081621681
profiles,address=0x70604a0,host.name=Hugin,profile_id=618098d29a6cefd6a4c0ea806880c2a8,sample=2,sample_name=cpu,sample_type=samples,sample_type_unit=count,sample_unit=nanoseconds build_id="fab9b8c848218405738c11a7ec4982e9",build_id_type="BUILD_ID_BINARY_HASH",end_time_unix_nano=1721306050081621681u,file_offset=18694144u,filename="chromium",frame_type="native",location="",memory_limit=250413056u,memory_start=18698240u,stack_trace_id="UaO9bysJnAYXFYobSdHXqg",start_time_unix_nano=1721306050081621681u,value=1i 1721306050081621681

View file

@ -0,0 +1,699 @@
{
"resourceProfiles": [
{
"resource": {
"attributes": [
{
"key": "profiling.agent.start_time",
"value": {
"stringValue": "1721306026320"
}
},
{
"key": "profiling.agent.env_https_proxy",
"value": {
"stringValue": ""
}
},
{
"key": "profiling.agent.config.tracers",
"value": {
"stringValue": "all"
}
},
{
"key": "profiling.agent.config.ca_address",
"value": {
"stringValue": "127.0.0.1:11000"
}
},
{
"key": "host:cpu/cache/L2-kbytes",
"value": {
"stringValue": "512"
}
},
{
"key": "profiling.host.kernel_version",
"value": {
"stringValue": "6.9.9-arch1-1"
}
},
{
"key": "profiling.agent.config.verbose",
"value": {
"stringValue": "false"
}
},
{
"key": "host:cpu/vendor/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/cores-per-socket",
"value": {
"stringValue": "8"
}
},
{
"key": "profiling.host.sysctl.kernel.unprivileged_bpf_disabled",
"value": {
"stringValue": "2"
}
},
{
"key": "profiling.agent.revision",
"value": {
"stringValue": "main-172652de"
}
},
{
"key": "host:cpu/clock/scaling-governor",
"value": {
"stringValue": "schedutil"
}
},
{
"key": "host:cpu/cache/L3-kbytes",
"value": {
"stringValue": "16384"
}
},
{
"key": "host:cpu/clock/min-mhz",
"value": {
"stringValue": "2200"
}
},
{
"key": "host:cpu/clock/scaling-cur-freq-mhz",
"value": {
"stringValue": "2226"
}
},
{
"key": "host:cpu/cpus",
"value": {
"stringValue": "16"
}
},
{
"key": "host:cpu/clock/min-mhz/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/cache/L2-kbytes/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "profiling.host.machine",
"value": {
"stringValue": "x86_64"
}
},
{
"key": "profiling.agent.version",
"value": {
"stringValue": "v0.0.0"
}
},
{
"key": "host:cpu/clock/max-mhz/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/flags",
"value": {
"stringValue": "3dnowprefetch,abm,adx,aes,aperfmperf,apic,arat,avic,avx,avx2,bmi1,bmi2,bpext,cat_l3,cdp_l3,clflush,clflushopt,clwb,clzero,cmov,cmp_legacy,constant_tsc,cpb,cpuid,cqm,cqm_llc,cqm_mbm_local,cqm_mbm_total,cqm_occup_llc,cr8_legacy,cx16,cx8,de,decodeassists,extapic,extd_apicid,f16c,flushbyasid,fma,fpu,fsgsbase,fxsr,fxsr_opt,ht,hw_pstate,ibpb,ibs,irperf,lahf_lm,lbrv,lm,mba,mca,mce,misalignsse,mmx,mmxext,monitor,movbe,msr,mtrr,mwaitx,nonstop_tsc,nopl,npt,nrip_save,nx,osvw,overflow_recov,pae,pat,pausefilter,pclmulqdq,pdpe1gb,perfctr_core,perfctr_llc,perfctr_nb,pfthreshold,pge,pni,popcnt,pse,pse36,rapl,rdpid,rdpru,rdrand,rdseed,rdt_a,rdtscp,rep_good,sep,sev,sev_es,sha_ni,skinit,smap,smca,smep,ssbd,sse,sse2,sse4_1,sse4_2,sse4a,ssse3,stibp,succor,svm,svm_lock,syscall,tce,topoext,tsc,tsc_scale,umip,v_spec_ctrl,v_vmsave_vmload,vgif,vmcb_clean,vme,vmmcall,wbnoinvd,wdt,x2apic,xgetbv1,xsave,xsavec,xsaveerptr,xsaveopt,xtopology"
}
},
{
"key": "profiling.agent.config.max_elements_per_interval",
"value": {
"stringValue": "1600"
}
},
{
"key": "host:cpu/model/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/clock/scaling-governor/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "profiling.agent.config.known_traces_entries",
"value": {
"stringValue": "65536"
}
},
{
"key": "profiling.agent.config.probabilistic_threshold",
"value": {
"stringValue": "100"
}
},
{
"key": "host:cpu/cache/L1i-kbytes/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/stepping",
"value": {
"stringValue": "0"
}
},
{
"key": "profiling.agent.config.probabilistic_interval",
"value": {
"stringValue": "1m0s"
}
},
{
"key": "profiling.agent.config.map_scale_factor",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/cpus/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/bugs/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/clock/scaling-driver/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/cache/L3-kbytes/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/cores-per-socket/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/clock/scaling-cur-freq-mhz/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "profiling.host.name",
"value": {
"stringValue": "Hugin"
}
},
{
"key": "host:cpu/clock/max-mhz",
"value": {
"stringValue": "4426"
}
},
{
"key": "profiling.host.sysctl.net.core.bpf_jit_enable",
"value": {
"stringValue": "1"
}
},
{
"key": "profiling.agent.config.bpf_log_size",
"value": {
"stringValue": "65536"
}
},
{
"key": "profiling.agent.config.present_cpu_cores",
"value": {
"stringValue": "16"
}
},
{
"key": "profiling.agent.build_timestamp",
"value": {
"stringValue": "1721228980"
}
},
{
"key": "profiling.host.kernel_proc_version",
"value": {
"stringValue": "Linux version 6.9.9-arch1-1 (linux@archlinux) (gcc (GCC) 14.1.1 20240522, GNU ld (GNU Binutils) 2.42.0) #1 SMP PREEMPT_DYNAMIC Fri, 12 Jul 2024 00:06:53 +0000\n"
}
},
{
"key": "host:cpu/threads-per-core/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/cache/L1d-kbytes/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "profiling.agent.config.file",
"value": {
"stringValue": "/etc/otel/profiling-agent/agent.conf"
}
},
{
"key": "profiling.agent.config.disable_tls",
"value": {
"stringValue": "true"
}
},
{
"key": "profiling.host.sysctl.kernel.bpf_stats_enabled",
"value": {
"stringValue": "0"
}
},
{
"key": "profiling.agent.config.tags",
"value": {
"stringValue": ""
}
},
{
"key": "host:cpu/clock/scaling-driver",
"value": {
"stringValue": "acpi-cpufreq"
}
},
{
"key": "host:cpu/online/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/cache/L1d-kbytes",
"value": {
"stringValue": "32"
}
},
{
"key": "profiling.agent.config.bpf_log_level",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/model-name/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host.arch",
"value": {
"stringValue": "amd64"
}
},
{
"key": "host:cpu/bugs",
"value": {
"stringValue": "retbleed,smt_rsb,spec_store_bypass,spectre_v1,spectre_v2,srso,sysret_ss_attrs"
}
},
{
"key": "host:cpu/threads-per-core",
"value": {
"stringValue": "2"
}
},
{
"key": "host:cpu/online",
"value": {
"stringValue": "0-15"
}
},
{
"key": "host:cpu/stepping/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/model-name",
"value": {
"stringValue": "AMD Ryzen 7 3700X 8-Core Processor"
}
},
{
"key": "profiling.host.ip",
"value": {
"stringValue": "127.0.0.1"
}
},
{
"key": "profiling.agent.config.cache_directory",
"value": {
"stringValue": "/var/cache/otel/profiling-agent"
}
},
{
"key": "profiling.agent.config.no_kernel_version_check",
"value": {
"stringValue": "false"
}
},
{
"key": "host:cpu/model",
"value": {
"stringValue": "113"
}
},
{
"key": "host:cpu/vendor",
"value": {
"stringValue": "AuthenticAMD"
}
},
{
"key": "host:cpu/flags/socketIDs",
"value": {
"stringValue": "0"
}
},
{
"key": "host:cpu/cache/L1i-kbytes",
"value": {
"stringValue": "32"
}
},
{
"key": "profiling.project.id",
"value": {
"stringValue": "1"
}
},
{
"key": "host.id",
"value": {
"stringValue": "1693958027216639598"
}
},
{
"key": "host.ip",
"value": {
"stringValue": "127.0.0.1"
}
},
{
"key": "host.name",
"value": {
"stringValue": "Hugin"
}
},
{
"key": "service.version",
"value": {
"stringValue": ""
}
},
{
"key": "os.kernel",
"value": {
"stringValue": "6.9.9-arch1-1"
}
}
]
},
"scopeProfiles": [
{
"scope": {},
"profiles": [
{
"profileId": "YYCY0pps79akwOqAaIDCqA==",
"startTimeUnixNano": "1721306050081621681",
"endTimeUnixNano": "1721306050081621681",
"profile": {
"sampleType": [
{
"type": "1",
"unit": "2"
}
],
"sample": [
{
"locationsLength": "1",
"stacktraceIdIndex": 5,
"value": [
"1"
],
"attributes": [
"0"
],
"timestampsUnixNano": [
"1721306048731622020"
]
},
{
"locationsStartIndex": "1",
"locationsLength": "4",
"stacktraceIdIndex": 9,
"value": [
"1"
],
"attributes": [
"1"
],
"timestampsUnixNano": [
"1721306049831718725"
]
},
{
"locationsStartIndex": "5",
"locationsLength": "6",
"stacktraceIdIndex": 12,
"value": [
"1"
],
"attributes": [
"2"
],
"timestampsUnixNano": [
"1721306050081621681"
]
}
],
"mapping": [
{
"memoryStart": "18698240",
"memoryLimit": "250413056",
"fileOffset": "18694144",
"filename": "7",
"buildId": "8",
"buildIdKind": "BUILD_ID_BINARY_HASH"
},
{
"memoryStart": "15638528",
"memoryLimit": "47255552",
"fileOffset": "15638528",
"filename": "10",
"buildId": "11",
"buildIdKind": "BUILD_ID_BINARY_HASH"
},
{
"buildId": "14",
"buildIdKind": "BUILD_ID_BINARY_HASH"
},
{
"memoryStart": "147456",
"memoryLimit": "1638400",
"fileOffset": "147456",
"filename": "15",
"buildId": "16",
"buildIdKind": "BUILD_ID_BINARY_HASH"
}
],
"location": [
{
"address": "95210353",
"typeIndex": 6
},
{
"mappingIndex": "1",
"address": "15945263",
"typeIndex": 6
},
{
"mappingIndex": "1",
"address": "15952400",
"typeIndex": 6
},
{
"mappingIndex": "1",
"address": "15953899",
"typeIndex": 6
},
{
"mappingIndex": "1",
"address": "16148175",
"typeIndex": 6
},
{
"mappingIndex": "2",
"address": "4770577",
"line": [
{
"functionIndex": "1"
}
],
"typeIndex": 13
},
{
"mappingIndex": "2",
"address": "4773632",
"line": [
{
"functionIndex": "2"
}
],
"typeIndex": 13
},
{
"mappingIndex": "2",
"address": "14783666",
"line": [
{
"functionIndex": "3"
}
],
"typeIndex": 13
},
{
"mappingIndex": "2",
"address": "16777518",
"line": [
{
"functionIndex": "4"
}
],
"typeIndex": 13
},
{
"mappingIndex": "3",
"address": "1139937",
"typeIndex": 6
},
{
"address": "117834912",
"typeIndex": 6
}
],
"locationIndices": [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
],
"function": [
{},
{
"name": "20"
},
{
"name": "17"
},
{
"name": "18"
},
{
"name": "19"
}
],
"attributeTable": [
{
"key": "thread.name",
"value": {
"stringValue": "chromium"
}
},
{
"key": "thread.name",
"value": {
"stringValue": "dockerd"
}
},
{
"key": "thread.name",
"value": {
"stringValue": "ThreadPoolServi"
}
}
],
"stringTable": [
"",
"samples",
"count",
"cpu",
"nanoseconds",
"hYmAzQVF8vy8MWbzsKpQNw",
"native",
"chromium",
"fab9b8c848218405738c11a7ec4982e9",
"4N3KEcGylb5Qoi2905c1ZA",
"dockerd",
"7dab4a2e0005d025e75cc72191f8d6bf",
"UaO9bysJnAYXFYobSdHXqg",
"kernel",
"cfc3dc7d1638c1284a6b62d4b5c0d74e",
"libc.so.6",
"982ed6c7a77f99f0ae746be0187953bf",
"__x64_sys_epoll_wait",
"do_syscall_64",
"entry_SYSCALL_64_after_hwframe",
"do_epoll_wait"
],
"timeNanos": "1721306050081621681",
"periodType": {
"type": "3",
"unit": "4"
},
"period": "50000000"
}
}
]
}
]
}
]
}

View file

@ -0,0 +1 @@
[[inputs.opentelemetry]]

View file

@ -0,0 +1,57 @@
package opentelemetry
import (
"context"
"fmt"
"time"
"github.com/influxdata/influxdb-observability/common"
"github.com/influxdata/influxdb-observability/otel2influx"
"github.com/influxdata/telegraf"
)
var (
_ otel2influx.InfluxWriter = (*writeToAccumulator)(nil)
_ otel2influx.InfluxWriterBatch = (*writeToAccumulator)(nil)
)
type writeToAccumulator struct {
accumulator telegraf.Accumulator
}
// NewBatch creates a new batch for writing telemetry data.
func (w *writeToAccumulator) NewBatch() otel2influx.InfluxWriterBatch {
return w
}
// EnqueuePoint adds a telemetry data point to the accumulator.
func (w *writeToAccumulator) EnqueuePoint(
_ context.Context,
measurement string,
tags map[string]string,
fields map[string]interface{},
ts time.Time,
vType common.InfluxMetricValueType,
) error {
switch vType {
case common.InfluxMetricValueTypeUntyped:
w.accumulator.AddFields(measurement, fields, tags, ts)
case common.InfluxMetricValueTypeGauge:
w.accumulator.AddGauge(measurement, fields, tags, ts)
case common.InfluxMetricValueTypeSum:
w.accumulator.AddCounter(measurement, fields, tags, ts)
case common.InfluxMetricValueTypeHistogram:
w.accumulator.AddHistogram(measurement, fields, tags, ts)
case common.InfluxMetricValueTypeSummary:
w.accumulator.AddSummary(measurement, fields, tags, ts)
default:
return fmt.Errorf("unrecognized InfluxMetricValueType %q", vType)
}
return nil
}
// WriteBatch does nothing.
func (*writeToAccumulator) WriteBatch(context.Context) error {
return nil
}