1
0
Fork 0

Adding upstream version 1.34.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:26:29 +02:00
parent e393c3af3f
commit 4978089aab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
4963 changed files with 677545 additions and 0 deletions

36
internal/choice/choice.go Normal file
View file

@ -0,0 +1,36 @@
// Package choice provides basic functions for working with
// plugin options that must be one of several values.
package choice
import "fmt"
// Contains return true if the choice in the list of choices.
func Contains(choice string, choices []string) bool {
for _, item := range choices {
if item == choice {
return true
}
}
return false
}
// Check returns an error if a choice is not one of
// the available choices.
func Check(choice string, available []string) error {
if !Contains(choice, available) {
return fmt.Errorf("unknown choice %s", choice)
}
return nil
}
// CheckSlice returns an error if the choices is not a subset of
// available.
func CheckSlice(choices, available []string) error {
for _, choice := range choices {
err := Check(choice, available)
if err != nil {
return err
}
}
return nil
}

495
internal/content_coding.go Normal file
View file

@ -0,0 +1,495 @@
package internal
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"github.com/klauspost/compress/gzip"
"github.com/klauspost/compress/zlib"
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
)
const defaultMaxDecompressionSize int64 = 500 * 1024 * 1024 // 500MB
// DecodingOption provide methods to change the decoding from the standard
// configuration.
type DecodingOption func(*decoderConfig)
type decoderConfig struct {
maxDecompressionSize int64
}
func WithMaxDecompressionSize(maxDecompressionSize int64) DecodingOption {
return func(cfg *decoderConfig) {
cfg.maxDecompressionSize = maxDecompressionSize
}
}
type encoderConfig struct {
level int
}
// EncodingOption provide methods to change the encoding from the standard
// configuration.
type EncodingOption func(*encoderConfig)
func WithCompressionLevel(level int) EncodingOption {
return func(cfg *encoderConfig) {
cfg.level = level
}
}
// NewStreamContentDecoder returns a reader that will decode the stream
// according to the encoding type.
func NewStreamContentDecoder(encoding string, r io.Reader) (io.Reader, error) {
switch encoding {
case "gzip":
return NewGzipReader(r)
case "identity", "":
return r, nil
default:
return nil, errors.New("invalid value for content_encoding")
}
}
// GzipReader is similar to gzip.Reader but reads only a single gzip stream per read.
type GzipReader struct {
r io.Reader
z *pgzip.Reader
endOfStream bool
}
func NewGzipReader(r io.Reader) (io.Reader, error) {
// We need a read that implements ByteReader in order to line up the next
// stream.
br := bufio.NewReader(r)
// Reads the first gzip stream header.
z, err := pgzip.NewReader(br)
if err != nil {
return nil, err
}
// Prevent future calls to Read from reading the following gzip header.
z.Multistream(false)
return &GzipReader{r: br, z: z}, nil
}
func (r *GzipReader) Read(b []byte) (int, error) {
if r.endOfStream {
// Reads the next gzip header and prepares for the next stream.
err := r.z.Reset(r.r)
if err != nil {
return 0, err
}
r.z.Multistream(false)
r.endOfStream = false
}
n, err := r.z.Read(b)
// Since multistream is disabled, io.EOF indicates the end of the gzip
// sequence. On the next read we must read the next gzip header.
if errors.Is(err, io.EOF) {
r.endOfStream = true
return n, nil
}
return n, err
}
// NewContentEncoder returns a ContentEncoder for the encoding type.
func NewContentEncoder(encoding string, options ...EncodingOption) (ContentEncoder, error) {
switch encoding {
case "gzip":
return NewGzipEncoder(options...)
case "identity", "":
return NewIdentityEncoder(options...)
case "zlib":
return NewZlibEncoder(options...)
case "zstd":
return NewZstdEncoder(options...)
default:
return nil, errors.New("invalid value for content_encoding")
}
}
type AutoDecoder struct {
encoding string
gzip *GzipDecoder
identity *IdentityDecoder
}
func (a *AutoDecoder) SetEncoding(encoding string) {
a.encoding = encoding
}
func (a *AutoDecoder) Decode(data []byte) ([]byte, error) {
if a.encoding == "gzip" {
return a.gzip.Decode(data)
}
return a.identity.Decode(data)
}
func NewAutoContentDecoder(options ...DecodingOption) *AutoDecoder {
var a AutoDecoder
a.identity = NewIdentityDecoder(options...)
a.gzip = NewGzipDecoder(options...)
return &a
}
// NewContentDecoder returns a ContentDecoder for the encoding type.
func NewContentDecoder(encoding string, options ...DecodingOption) (ContentDecoder, error) {
switch encoding {
case "auto":
return NewAutoContentDecoder(options...), nil
case "gzip":
return NewGzipDecoder(options...), nil
case "identity", "":
return NewIdentityDecoder(options...), nil
case "zlib":
return NewZlibDecoder(options...), nil
case "zstd":
return NewZstdDecoder(options...)
default:
return nil, errors.New("invalid value for content_encoding")
}
}
// ContentEncoder applies a wrapper encoding to byte buffers.
type ContentEncoder interface {
Encode([]byte) ([]byte, error)
}
// GzipEncoder compresses the buffer using gzip at the default level.
type GzipEncoder struct {
pwriter *pgzip.Writer
writer *gzip.Writer
buf *bytes.Buffer
}
func NewGzipEncoder(options ...EncodingOption) (*GzipEncoder, error) {
cfg := encoderConfig{level: gzip.DefaultCompression}
for _, o := range options {
o(&cfg)
}
// Check if the compression level is supported
switch cfg.level {
case gzip.NoCompression, gzip.DefaultCompression, gzip.BestSpeed, gzip.BestCompression:
// Do nothing as those are valid levels
default:
return nil, errors.New("invalid compression level, only 0, 1 and 9 are supported")
}
var buf bytes.Buffer
pw, err := pgzip.NewWriterLevel(&buf, cfg.level)
if err != nil {
return nil, err
}
w, err := gzip.NewWriterLevel(&buf, cfg.level)
return &GzipEncoder{
pwriter: pw,
writer: w,
buf: &buf,
}, err
}
func (e *GzipEncoder) Encode(data []byte) ([]byte, error) {
// Parallel Gzip is only faster for larger data chunks. According to the
// project's documentation the trade-off size is at about 1MB, so we switch
// to parallel Gzip if the data is larger and run the built-in version
// otherwise.
if len(data) > 1024*1024 {
return e.encodeBig(data)
}
return e.encodeSmall(data)
}
func (e *GzipEncoder) encodeSmall(data []byte) ([]byte, error) {
e.buf.Reset()
e.writer.Reset(e.buf)
_, err := e.writer.Write(data)
if err != nil {
return nil, err
}
err = e.writer.Close()
if err != nil {
return nil, err
}
return e.buf.Bytes(), nil
}
func (e *GzipEncoder) encodeBig(data []byte) ([]byte, error) {
e.buf.Reset()
e.pwriter.Reset(e.buf)
_, err := e.pwriter.Write(data)
if err != nil {
return nil, err
}
err = e.pwriter.Close()
if err != nil {
return nil, err
}
return e.buf.Bytes(), nil
}
type ZlibEncoder struct {
writer *zlib.Writer
buf *bytes.Buffer
}
func NewZlibEncoder(options ...EncodingOption) (*ZlibEncoder, error) {
cfg := encoderConfig{level: zlib.DefaultCompression}
for _, o := range options {
o(&cfg)
}
switch cfg.level {
case zlib.NoCompression, zlib.DefaultCompression, zlib.BestSpeed, zlib.BestCompression:
// Do nothing as those are valid levels
default:
return nil, errors.New("invalid compression level, only 0, 1 and 9 are supported")
}
var buf bytes.Buffer
w, err := zlib.NewWriterLevel(&buf, cfg.level)
return &ZlibEncoder{
writer: w,
buf: &buf,
}, err
}
func (e *ZlibEncoder) Encode(data []byte) ([]byte, error) {
e.buf.Reset()
e.writer.Reset(e.buf)
_, err := e.writer.Write(data)
if err != nil {
return nil, err
}
err = e.writer.Close()
if err != nil {
return nil, err
}
return e.buf.Bytes(), nil
}
type ZstdEncoder struct {
encoder *zstd.Encoder
}
func NewZstdEncoder(options ...EncodingOption) (*ZstdEncoder, error) {
cfg := encoderConfig{level: 3}
for _, o := range options {
o(&cfg)
}
// Map the levels
var level zstd.EncoderLevel
switch cfg.level {
case 1:
level = zstd.SpeedFastest
case 3:
level = zstd.SpeedDefault
case 7:
level = zstd.SpeedBetterCompression
case 11:
level = zstd.SpeedBestCompression
default:
return nil, errors.New("invalid compression level, only 1, 3, 7 and 11 are supported")
}
e, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(level))
return &ZstdEncoder{
encoder: e,
}, err
}
func (e *ZstdEncoder) Encode(data []byte) ([]byte, error) {
return e.encoder.EncodeAll(data, make([]byte, 0, len(data))), nil
}
// IdentityEncoder is a null encoder that applies no transformation.
type IdentityEncoder struct{}
func NewIdentityEncoder(options ...EncodingOption) (*IdentityEncoder, error) {
if len(options) > 0 {
return nil, errors.New("identity encoder does not support options")
}
return &IdentityEncoder{}, nil
}
func (*IdentityEncoder) Encode(data []byte) ([]byte, error) {
return data, nil
}
// ContentDecoder removes a wrapper encoding from byte buffers.
type ContentDecoder interface {
SetEncoding(string)
Decode([]byte) ([]byte, error)
}
// GzipDecoder decompresses buffers with gzip compression.
type GzipDecoder struct {
preader *pgzip.Reader
reader *gzip.Reader
buf *bytes.Buffer
maxDecompressionSize int64
}
func NewGzipDecoder(options ...DecodingOption) *GzipDecoder {
cfg := decoderConfig{maxDecompressionSize: defaultMaxDecompressionSize}
for _, o := range options {
o(&cfg)
}
return &GzipDecoder{
preader: new(pgzip.Reader),
reader: new(gzip.Reader),
buf: new(bytes.Buffer),
maxDecompressionSize: cfg.maxDecompressionSize,
}
}
func (*GzipDecoder) SetEncoding(string) {}
func (d *GzipDecoder) Decode(data []byte) ([]byte, error) {
// Parallel Gzip is only faster for larger data chunks. According to the
// project's documentation the trade-off size is at about 1MB, so we switch
// to parallel Gzip if the data is larger and run the built-in version
// otherwise.
if len(data) > 1024*1024 {
return d.decodeBig(data)
}
return d.decodeSmall(data)
}
func (d *GzipDecoder) decodeSmall(data []byte) ([]byte, error) {
err := d.reader.Reset(bytes.NewBuffer(data))
if err != nil {
return nil, err
}
d.buf.Reset()
n, err := io.CopyN(d.buf, d.reader, d.maxDecompressionSize)
if err != nil && !errors.Is(err, io.EOF) {
return nil, err
} else if n == d.maxDecompressionSize {
return nil, fmt.Errorf("size of decoded data exceeds allowed size %d", d.maxDecompressionSize)
}
err = d.reader.Close()
if err != nil {
return nil, err
}
return d.buf.Bytes(), nil
}
func (d *GzipDecoder) decodeBig(data []byte) ([]byte, error) {
err := d.preader.Reset(bytes.NewBuffer(data))
if err != nil {
return nil, err
}
d.buf.Reset()
n, err := io.CopyN(d.buf, d.preader, d.maxDecompressionSize)
if err != nil && !errors.Is(err, io.EOF) {
return nil, err
} else if n == d.maxDecompressionSize {
return nil, fmt.Errorf("size of decoded data exceeds allowed size %d", d.maxDecompressionSize)
}
err = d.preader.Close()
if err != nil {
return nil, err
}
return d.buf.Bytes(), nil
}
type ZlibDecoder struct {
buf *bytes.Buffer
maxDecompressionSize int64
}
func NewZlibDecoder(options ...DecodingOption) *ZlibDecoder {
cfg := decoderConfig{maxDecompressionSize: defaultMaxDecompressionSize}
for _, o := range options {
o(&cfg)
}
return &ZlibDecoder{
buf: new(bytes.Buffer),
maxDecompressionSize: cfg.maxDecompressionSize,
}
}
func (*ZlibDecoder) SetEncoding(string) {}
func (d *ZlibDecoder) Decode(data []byte) ([]byte, error) {
d.buf.Reset()
b := bytes.NewBuffer(data)
r, err := zlib.NewReader(b)
if err != nil {
return nil, err
}
n, err := io.CopyN(d.buf, r, d.maxDecompressionSize)
if err != nil && !errors.Is(err, io.EOF) {
return nil, err
} else if n == d.maxDecompressionSize {
return nil, fmt.Errorf("size of decoded data exceeds allowed size %d", d.maxDecompressionSize)
}
err = r.Close()
if err != nil {
return nil, err
}
return d.buf.Bytes(), nil
}
type ZstdDecoder struct {
decoder *zstd.Decoder
}
func NewZstdDecoder(options ...DecodingOption) (*ZstdDecoder, error) {
cfg := decoderConfig{maxDecompressionSize: defaultMaxDecompressionSize}
for _, o := range options {
o(&cfg)
}
d, err := zstd.NewReader(nil, zstd.WithDecoderConcurrency(0), zstd.WithDecoderMaxWindow(uint64(cfg.maxDecompressionSize)))
return &ZstdDecoder{
decoder: d,
}, err
}
func (*ZstdDecoder) SetEncoding(string) {}
func (d *ZstdDecoder) Decode(data []byte) ([]byte, error) {
return d.decoder.DecodeAll(data, nil)
}
// IdentityDecoder is a null decoder that returns the input.
type IdentityDecoder struct {
}
func NewIdentityDecoder(_ ...DecodingOption) *IdentityDecoder {
return &IdentityDecoder{}
}
func (*IdentityDecoder) SetEncoding(string) {}
func (*IdentityDecoder) Decode(data []byte) ([]byte, error) {
return data, nil
}

View file

@ -0,0 +1,555 @@
package internal
import (
"bytes"
"fmt"
"io"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
const maxDecompressionSize = 1024
func TestGzipEncodeDecode(t *testing.T) {
enc, err := NewGzipEncoder()
require.NoError(t, err)
dec := NewGzipDecoder(WithMaxDecompressionSize(maxDecompressionSize))
payload, err := enc.Encode([]byte("howdy"))
require.NoError(t, err)
actual, err := dec.Decode(payload)
require.NoError(t, err)
require.Equal(t, "howdy", string(actual))
}
func TestGzipReuse(t *testing.T) {
enc, err := NewGzipEncoder()
require.NoError(t, err)
dec := NewGzipDecoder(WithMaxDecompressionSize(maxDecompressionSize))
payload, err := enc.Encode([]byte("howdy"))
require.NoError(t, err)
actual, err := dec.Decode(payload)
require.NoError(t, err)
require.Equal(t, "howdy", string(actual))
payload, err = enc.Encode([]byte("doody"))
require.NoError(t, err)
actual, err = dec.Decode(payload)
require.NoError(t, err)
require.Equal(t, "doody", string(actual))
}
func TestZlibEncodeDecode(t *testing.T) {
enc, err := NewZlibEncoder()
require.NoError(t, err)
dec := NewZlibDecoder(WithMaxDecompressionSize(maxDecompressionSize))
payload, err := enc.Encode([]byte("howdy"))
require.NoError(t, err)
actual, err := dec.Decode(payload)
require.NoError(t, err)
require.Equal(t, "howdy", string(actual))
}
func TestZlibEncodeDecodeWithTooLargeMessage(t *testing.T) {
enc, err := NewZlibEncoder()
require.NoError(t, err)
dec := NewZlibDecoder(WithMaxDecompressionSize(3))
payload, err := enc.Encode([]byte("howdy"))
require.NoError(t, err)
_, err = dec.Decode(payload)
require.ErrorContains(t, err, "size of decoded data exceeds allowed size 3")
}
func TestZstdEncodeDecode(t *testing.T) {
enc, err := NewZstdEncoder()
require.NoError(t, err)
dec, err := NewZstdDecoder(WithMaxDecompressionSize(maxDecompressionSize))
require.NoError(t, err)
payload, err := enc.Encode([]byte("howdy"))
require.NoError(t, err)
actual, err := dec.Decode(payload)
require.NoError(t, err)
require.Equal(t, "howdy", string(actual))
}
func TestZstdReuse(t *testing.T) {
enc, err := NewZstdEncoder()
require.NoError(t, err)
dec, err := NewZstdDecoder(WithMaxDecompressionSize(maxDecompressionSize))
require.NoError(t, err)
payload, err := enc.Encode([]byte("howdy"))
require.NoError(t, err)
actual, err := dec.Decode(payload)
require.NoError(t, err)
require.Equal(t, "howdy", string(actual))
payload, err = enc.Encode([]byte("doody"))
require.NoError(t, err)
actual, err = dec.Decode(payload)
require.NoError(t, err)
require.Equal(t, "doody", string(actual))
}
func TestIdentityEncodeDecode(t *testing.T) {
dec := NewIdentityDecoder(WithMaxDecompressionSize(maxDecompressionSize))
enc, err := NewIdentityEncoder()
require.NoError(t, err)
payload, err := enc.Encode([]byte("howdy"))
require.NoError(t, err)
actual, err := dec.Decode(payload)
require.NoError(t, err)
require.Equal(t, "howdy", string(actual))
}
func TestStreamIdentityDecode(t *testing.T) {
var r bytes.Buffer
n, err := r.WriteString("howdy")
require.NoError(t, err)
require.Equal(t, 5, n)
dec, err := NewStreamContentDecoder("identity", &r)
require.NoError(t, err)
data, err := io.ReadAll(dec)
require.NoError(t, err)
require.Equal(t, []byte("howdy"), data)
}
func TestStreamGzipDecode(t *testing.T) {
enc, err := NewGzipEncoder()
require.NoError(t, err)
written, err := enc.Encode([]byte("howdy"))
require.NoError(t, err)
w := bytes.NewBuffer(written)
dec, err := NewStreamContentDecoder("gzip", w)
require.NoError(t, err)
b := make([]byte, 10)
n, err := dec.Read(b)
require.NoError(t, err)
require.Equal(t, 5, n)
require.Equal(t, []byte("howdy"), b[:n])
}
func TestCompressionLevel(t *testing.T) {
tests := []struct {
algorithm string
validLevels []int
errormsg string
}{
{
algorithm: "gzip",
validLevels: []int{0, 1, 9},
errormsg: "invalid compression level",
},
{
algorithm: "zlib",
validLevels: []int{0, 1, 9},
errormsg: "invalid compression level",
},
{
algorithm: "zstd",
validLevels: []int{1, 3, 7, 11},
errormsg: "invalid compression level",
},
{
algorithm: "identity",
errormsg: "does not support options",
},
}
for _, tt := range tests {
// Check default i.e. without specifying level
t.Run(tt.algorithm+" default", func(t *testing.T) {
enc, err := NewContentEncoder(tt.algorithm)
require.NoError(t, err)
require.NotNil(t, enc)
})
// Check invalid level
t.Run(tt.algorithm+" invalid", func(t *testing.T) {
_, err := NewContentEncoder(tt.algorithm, WithCompressionLevel(12))
require.ErrorContains(t, err, tt.errormsg)
})
// Check known levels 0..9
for level := 0; level < 10; level++ {
name := fmt.Sprintf("%s level %d", tt.algorithm, level)
t.Run(name, func(t *testing.T) {
var valid bool
for _, l := range tt.validLevels {
if l == level {
valid = true
break
}
}
enc, err := NewContentEncoder(tt.algorithm, WithCompressionLevel(level))
if valid {
require.NoError(t, err)
require.NotNil(t, enc)
} else {
require.ErrorContains(t, err, tt.errormsg)
}
})
}
}
}
func BenchmarkGzipEncode(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 64))
dataLen := int64(len(data)) + 1
enc, err := NewGzipEncoder()
require.NoError(b, err)
dec := NewGzipDecoder(WithMaxDecompressionSize(dataLen))
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
_, err := enc.Encode(data)
require.NoError(b, err)
}
}
func BenchmarkGzipDecode(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 64))
dataLen := int64(len(data)) + 1
enc, err := NewGzipEncoder()
require.NoError(b, err)
dec := NewGzipDecoder(WithMaxDecompressionSize(dataLen))
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
_, err = dec.Decode(payload)
require.NoError(b, err)
}
}
func BenchmarkGzipEncodeDecode(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 64))
dataLen := int64(len(data)) + 1
enc, err := NewGzipEncoder()
require.NoError(b, err)
dec := NewGzipDecoder(WithMaxDecompressionSize(dataLen))
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
payload, err := enc.Encode(data)
require.NoError(b, err)
_, err = dec.Decode(payload)
require.NoError(b, err)
}
}
func BenchmarkGzipEncodeBig(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 1024*1024))
dataLen := int64(len(data)) + 1
enc, err := NewGzipEncoder()
require.NoError(b, err)
dec := NewGzipDecoder(WithMaxDecompressionSize(dataLen))
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
_, err := enc.Encode(data)
require.NoError(b, err)
}
}
func BenchmarkGzipDecodeBig(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 1024*1024))
dataLen := int64(len(data)) + 1
enc, err := NewGzipEncoder()
require.NoError(b, err)
dec := NewGzipDecoder(WithMaxDecompressionSize(dataLen))
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
_, err = dec.Decode(payload)
require.NoError(b, err)
}
}
func BenchmarkGzipEncodeDecodeBig(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 1024*1024))
dataLen := int64(len(data)) + 1
enc, err := NewGzipEncoder()
require.NoError(b, err)
dec := NewGzipDecoder(WithMaxDecompressionSize(dataLen))
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
payload, err := enc.Encode(data)
require.NoError(b, err)
_, err = dec.Decode(payload)
require.NoError(b, err)
}
}
func BenchmarkZstdEncode(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 64))
dataLen := int64(len(data)) + 1
enc, err := NewZstdEncoder()
require.NoError(b, err)
dec, err := NewZstdDecoder(WithMaxDecompressionSize(dataLen))
require.NoError(b, err)
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
_, err := enc.Encode(data)
require.NoError(b, err)
}
}
func BenchmarkZstdDecode(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 64))
dataLen := int64(len(data)) + 1
enc, err := NewZstdEncoder()
require.NoError(b, err)
dec, err := NewZstdDecoder(WithMaxDecompressionSize(dataLen))
require.NoError(b, err)
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
_, err = dec.Decode(payload)
require.NoError(b, err)
}
}
func BenchmarkZstdEncodeDecode(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 64))
dataLen := int64(len(data)) + 1
enc, err := NewZstdEncoder()
require.NoError(b, err)
dec, err := NewZstdDecoder(WithMaxDecompressionSize(dataLen))
require.NoError(b, err)
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
payload, err := enc.Encode(data)
require.NoError(b, err)
_, err = dec.Decode(payload)
require.NoError(b, err)
}
}
func BenchmarkZstdEncodeBig(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 1024*1024))
dataLen := int64(len(data)) + 1
enc, err := NewZstdEncoder()
require.NoError(b, err)
dec, err := NewZstdDecoder(WithMaxDecompressionSize(dataLen))
require.NoError(b, err)
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
_, err := enc.Encode(data)
require.NoError(b, err)
}
}
func BenchmarkZstdDecodeBig(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 1024*1024))
dataLen := int64(len(data)) + 1
enc, err := NewZstdEncoder()
require.NoError(b, err)
dec, err := NewZstdDecoder(WithMaxDecompressionSize(dataLen))
require.NoError(b, err)
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
_, err = dec.Decode(payload)
require.NoError(b, err)
}
}
func BenchmarkZstdEncodeDecodeBig(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 1024*1024))
dataLen := int64(len(data)) + 1
enc, err := NewZstdEncoder()
require.NoError(b, err)
dec, err := NewZstdDecoder(WithMaxDecompressionSize(dataLen))
require.NoError(b, err)
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
payload, err := enc.Encode(data)
require.NoError(b, err)
_, err = dec.Decode(payload)
require.NoError(b, err)
}
}
func BenchmarkZlibEncode(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 64))
dataLen := int64(len(data)) + 1
enc, err := NewZlibEncoder()
require.NoError(b, err)
dec := NewZlibDecoder(WithMaxDecompressionSize(dataLen))
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
_, err := enc.Encode(data)
require.NoError(b, err)
}
}
func BenchmarkZlibDecode(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 64))
dataLen := int64(len(data)) + 1
enc, err := NewZlibEncoder()
require.NoError(b, err)
dec := NewZlibDecoder(WithMaxDecompressionSize(dataLen))
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
_, err = dec.Decode(payload)
require.NoError(b, err)
}
}
func BenchmarkZlibEncodeDecode(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 64))
dataLen := int64(len(data)) + 1
enc, err := NewZlibEncoder()
require.NoError(b, err)
dec := NewZlibDecoder(WithMaxDecompressionSize(dataLen))
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
payload, err := enc.Encode(data)
require.NoError(b, err)
_, err = dec.Decode(payload)
require.NoError(b, err)
}
}
func BenchmarkIdentityEncodeDecode(b *testing.B) {
data := []byte(strings.Repeat("-howdy stranger-", 64))
dataLen := int64(len(data)) + 1
dec := NewIdentityDecoder(WithMaxDecompressionSize(dataLen))
enc, err := NewIdentityEncoder()
require.NoError(b, err)
payload, err := enc.Encode(data)
require.NoError(b, err)
actual, err := dec.Decode(payload)
require.NoError(b, err)
require.Equal(b, data, actual)
for n := 0; n < b.N; n++ {
payload, err := enc.Encode(data)
require.NoError(b, err)
_, err = dec.Decode(payload)
require.NoError(b, err)
}
}

View file

@ -0,0 +1,5 @@
//go:build !custom
package internal
const Customized = ""

View file

@ -0,0 +1,5 @@
//go:build custom
package internal
const Customized = " (customized)"

33
internal/docker/docker.go Normal file
View file

@ -0,0 +1,33 @@
package docker
import "strings"
// ParseImage Adapts some of the logic from the actual Docker library's image parsing routines:
// https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go
func ParseImage(image string) (imageName, imageVersion string) {
domain := ""
remainder := ""
i := strings.IndexRune(image, '/')
if i == -1 || (!strings.ContainsAny(image[:i], ".:") && image[:i] != "localhost") {
remainder = image
} else {
domain, remainder = image[:i], image[i+1:]
}
imageVersion = "unknown"
i = strings.LastIndex(remainder, ":")
if i > -1 {
imageVersion = remainder[i+1:]
imageName = remainder[:i]
} else {
imageName = remainder
}
if domain != "" {
imageName = domain + "/" + imageName
}
return imageName, imageVersion
}

View file

@ -0,0 +1,60 @@
package docker_test
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/internal/docker"
)
func TestParseImage(t *testing.T) {
tests := []struct {
image string
parsedName string
parsedVersion string
}{
{
image: "postgres",
parsedName: "postgres",
parsedVersion: "unknown",
},
{
image: "postgres:latest",
parsedName: "postgres",
parsedVersion: "latest",
},
{
image: "coreos/etcd",
parsedName: "coreos/etcd",
parsedVersion: "unknown",
},
{
image: "coreos/etcd:latest",
parsedName: "coreos/etcd",
parsedVersion: "latest",
},
{
image: "quay.io/postgres",
parsedName: "quay.io/postgres",
parsedVersion: "unknown",
},
{
image: "quay.io:4443/coreos/etcd",
parsedName: "quay.io:4443/coreos/etcd",
parsedVersion: "unknown",
},
{
image: "quay.io:4443/coreos/etcd:latest",
parsedName: "quay.io:4443/coreos/etcd",
parsedVersion: "latest",
},
}
for _, tt := range tests {
t.Run("parse name "+tt.image, func(t *testing.T) {
imageName, imageVersion := docker.ParseImage(tt.image)
require.Equal(t, tt.parsedName, imageName)
require.Equal(t, tt.parsedVersion, imageVersion)
})
}
}

19
internal/env.go Normal file
View file

@ -0,0 +1,19 @@
package internal
import "os"
// GetProcPath returns the path stored in HOST_PROC env variable, or /proc if HOST_PROC has not been set.
func GetProcPath() string {
if hostProc := os.Getenv("HOST_PROC"); hostProc != "" {
return hostProc
}
return "/proc"
}
// GetSysPath returns the path stored in HOST_SYS env variable, or /sys if HOST_SYS has not been set.
func GetSysPath() string {
if hostSys := os.Getenv("HOST_SYS"); hostSys != "" {
return hostSys
}
return "/sys"
}

63
internal/errors.go Normal file
View file

@ -0,0 +1,63 @@
package internal
import "errors"
var (
ErrNotConnected = errors.New("not connected")
ErrSerialization = errors.New("serialization of metric(s) failed")
ErrSizeLimitReached = errors.New("size limit reached")
)
// StartupError indicates an error that occurred during startup of a plugin
// e.g. due to connectivity issues or resources being not yet available.
// In case the 'Retry' flag is set, the startup of the plugin might be retried
// depending on the configured startup-error-behavior. The 'RemovePlugin'
// flag denotes if the agent should remove the plugin from further processing.
type StartupError struct {
Err error
Retry bool
Partial bool
}
func (e *StartupError) Error() string {
return e.Err.Error()
}
func (e *StartupError) Unwrap() error {
return e.Err
}
// FatalError indicates a not-recoverable error in the plugin. The corresponding
// plugin should be remove by the agent stopping any further processing for that
// plugin instance.
type FatalError struct {
Err error
}
func (e *FatalError) Error() string {
return e.Err.Error()
}
func (e *FatalError) Unwrap() error {
return e.Err
}
// PartialWriteError indicate that only a subset of the metrics were written
// successfully (i.e. accepted). The rejected metrics should be removed from
// the buffer without being successfully written. Please note: the metrics
// are specified as indices into the batch to be able to reference tracking
// metrics correctly.
type PartialWriteError struct {
Err error
MetricsAccept []int
MetricsReject []int
MetricsRejectErrors []error
}
func (e *PartialWriteError) Error() string {
return e.Err.Error()
}
func (e *PartialWriteError) Unwrap() error {
return e.Err
}

44
internal/exec.go Normal file
View file

@ -0,0 +1,44 @@
package internal
import (
"bytes"
"os/exec"
"time"
)
// CombinedOutputTimeout runs the given command with the given timeout and
// returns the combined output of stdout and stderr.
// If the command times out, it attempts to kill the process.
func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
var b bytes.Buffer
c.Stdout = &b
c.Stderr = &b
if err := c.Start(); err != nil {
return nil, err
}
err := WaitTimeout(c, timeout)
return b.Bytes(), err
}
// StdOutputTimeout runs the given command with the given timeout and
// returns the output of stdout.
// If the command times out, it attempts to kill the process.
func StdOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
var b bytes.Buffer
c.Stdout = &b
c.Stderr = nil
if err := c.Start(); err != nil {
return nil, err
}
err := WaitTimeout(c, timeout)
return b.Bytes(), err
}
// RunTimeout runs the given command with the given timeout.
// If the command times out, it attempts to kill the process.
func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
if err := c.Start(); err != nil {
return err
}
return WaitTimeout(c, timeout)
}

66
internal/exec_unix.go Normal file
View file

@ -0,0 +1,66 @@
//go:build !windows
package internal
import (
"log"
"os/exec"
"syscall"
"time"
)
// KillGrace is the amount of time we allow a process to shutdown before
// sending a SIGKILL.
const KillGrace = 5 * time.Second
// WaitTimeout waits for the given command to finish with a timeout.
// It assumes the command has already been started.
// If the command times out, it attempts to kill the process.
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
var kill *time.Timer
term := time.AfterFunc(timeout, func() {
err := syscall.Kill(-c.Process.Pid, syscall.SIGTERM)
if err != nil {
log.Printf("E! [agent] Error terminating process children: %s", err)
}
err = c.Process.Signal(syscall.SIGTERM)
if err != nil {
log.Printf("E! [agent] Error terminating process: %s", err)
return
}
kill = time.AfterFunc(KillGrace, func() {
err := syscall.Kill(-c.Process.Pid, syscall.SIGKILL)
if err != nil {
log.Printf("E! [agent] Error terminating process children: %s", err)
}
err = c.Process.Kill()
if err != nil {
log.Printf("E! [agent] Error killing process: %s", err)
return
}
})
})
err := c.Wait()
// Shutdown all timers
if kill != nil {
kill.Stop()
}
termSent := !term.Stop()
// If the process exited without error treat it as success. This allows a
// process to do a clean shutdown on signal.
if err == nil {
return nil
}
// If SIGTERM was sent then treat any process error as a timeout.
if termSent {
return ErrTimeout
}
// Otherwise there was an error unrelated to termination.
return err
}

41
internal/exec_windows.go Normal file
View file

@ -0,0 +1,41 @@
//go:build windows
package internal
import (
"log"
"os/exec"
"time"
)
// WaitTimeout waits for the given command to finish with a timeout.
// It assumes the command has already been started.
// If the command times out, it attempts to kill the process.
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
timer := time.AfterFunc(timeout, func() {
err := c.Process.Kill()
if err != nil {
log.Printf("E! [agent] Error killing process: %s", err)
return
}
})
err := c.Wait()
// Shutdown all timers
termSent := !timer.Stop()
// If the process exited without error treat it as success. This allows a
// process to do a clean shutdown on signal.
if err == nil {
return nil
}
// If SIGTERM was sent then treat any process error as a timeout.
if termSent {
return ErrTimeout
}
// Otherwise there was an error unrelated to termination.
return err
}

42
internal/fuzz/json.go Normal file
View file

@ -0,0 +1,42 @@
package fuzz
// https://github.com/google/AFL/blob/master/dictionaries/json.dict
var JSONDictionary = []string{
"0",
",0",
":0",
"0:",
"-1.2e+3",
"true",
"false",
"null",
"\"\"",
",\"\"",
":\"\"",
"\"\":",
"{}",
",{}",
":{}",
"{\"\":0}",
"{{}}",
"[]",
",[]",
":[]",
"[0]",
"[[]]",
"''",
"\\",
"\\b",
"\\f",
"\\n",
"\\r",
"\\t",
"\\u0000",
"\\x00",
"\\0",
"\\uD800\\uDC00",
"\\uDBFF\\uDFFF",
"\"\":0",
"//",
"/**/",
}

View file

@ -0,0 +1,97 @@
package globpath
import (
"os"
"path/filepath"
"strings"
"github.com/bmatcuk/doublestar/v3"
"github.com/gobwas/glob"
)
type GlobPath struct {
path string
hasMeta bool
HasSuperMeta bool
rootGlob string
g glob.Glob
}
func Compile(path string) (*GlobPath, error) {
out := GlobPath{
hasMeta: hasMeta(path),
HasSuperMeta: hasSuperMeta(path),
path: filepath.FromSlash(path),
}
// if there are no glob meta characters in the path, don't bother compiling
// a glob object
if !out.hasMeta || !out.HasSuperMeta {
return &out, nil
}
// find the root elements of the object path, the entry point for recursion
// when you have a super-meta in your path (which are :
// glob(/your/expression/until/first/star/of/super-meta))
out.rootGlob = path[:strings.Index(path, "**")+1]
var err error
if out.g, err = glob.Compile(path, os.PathSeparator); err != nil {
return nil, err
}
return &out, nil
}
// Match returns all files matching the expression.
// If it's a static path, returns path.
// All returned path will have the host platform separator.
func (g *GlobPath) Match() []string {
// This string replacement is for backwards compatibility support
// The original implementation allowed **.txt but the double star package requires **/**.txt
g.path = strings.ReplaceAll(g.path, "**/**", "**")
g.path = strings.ReplaceAll(g.path, "**", "**/**")
//nolint:errcheck // pattern is known
files, _ := doublestar.Glob(g.path)
return files
}
// MatchString tests the path string against the glob. The path should contain
// the host platform separator.
func (g *GlobPath) MatchString(path string) bool {
if !g.HasSuperMeta {
//nolint:errcheck // pattern is known
res, _ := filepath.Match(g.path, path)
return res
}
return g.g.Match(path)
}
// GetRoots returns a list of files and directories which should be optimal
// prefixes of matching files when you have a super-meta in your expression :
// - any directory under these roots may contain a matching file
// - no file outside of these roots can match the pattern
// Note that it returns both files and directories.
// All returned path will have the host platform separator.
func (g *GlobPath) GetRoots() []string {
if !g.hasMeta {
return []string{g.path}
}
if !g.HasSuperMeta {
//nolint:errcheck // pattern is known
matches, _ := filepath.Glob(g.path)
return matches
}
//nolint:errcheck // pattern is known
roots, _ := filepath.Glob(g.rootGlob)
return roots
}
// hasMeta reports whether path contains any magic glob characters.
func hasMeta(path string) bool {
return strings.ContainsAny(path, "*?[")
}
// hasSuperMeta reports whether path contains any super magic glob characters (**).
func hasSuperMeta(path string) bool {
return strings.Contains(path, "**")
}

View file

@ -0,0 +1,124 @@
//go:build !windows
// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows
// https://github.com/influxdata/telegraf/issues/6248
package globpath
import (
"os"
"path/filepath"
"runtime"
"testing"
"github.com/stretchr/testify/require"
)
var (
testdataDir = getTestdataDir()
)
func TestCompileAndMatch(t *testing.T) {
type test struct {
path string
matches int
}
tests := []test{
// test super asterisk
{path: filepath.Join(testdataDir, "**"), matches: 7},
// test single asterisk
{path: filepath.Join(testdataDir, "*.log"), matches: 3},
// test no meta characters (file exists)
{path: filepath.Join(testdataDir, "log1.log"), matches: 1},
// test file that doesn't exist
{path: filepath.Join(testdataDir, "i_dont_exist.log"), matches: 0},
// test super asterisk that doesn't exist
{path: filepath.Join(testdataDir, "dir_doesnt_exist", "**"), matches: 0},
// test exclamation mark creates non-matching list with a range
{path: filepath.Join(testdataDir, "log[!1-2]*"), matches: 1},
// test caret creates non-matching list
{path: filepath.Join(testdataDir, "log[^1-2]*"), matches: 1},
// test exclamation mark creates non-matching list without a range
{path: filepath.Join(testdataDir, "log[!2]*"), matches: 2},
// test exclamation mark creates non-matching list without a range
//nolint:gocritic // filepathJoin - '\\' used to escape in glob, not path separator
{path: filepath.Join(testdataDir, "log\\[!*"), matches: 1},
// test exclamation mark creates non-matching list without a range
//nolint:gocritic // filepathJoin - '\\' used to escape in glob, not path separator
{path: filepath.Join(testdataDir, "log\\[^*"), matches: 0},
}
for _, tc := range tests {
g, err := Compile(tc.path)
require.NoError(t, err)
matches := g.Match()
require.Len(t, matches, tc.matches)
}
}
func TestRootGlob(t *testing.T) {
tests := []struct {
input string
output string
}{
{filepath.Join(testdataDir, "**"), filepath.Join(testdataDir, "*")},
{filepath.Join(testdataDir, "nested?", "**"), filepath.Join(testdataDir, "nested?", "*")},
{filepath.Join(testdataDir, "ne**", "nest*"), filepath.Join(testdataDir, "ne*")},
{filepath.Join(testdataDir, "nested?", "*"), ""},
}
for _, test := range tests {
actual, err := Compile(test.input)
require.NoError(t, err)
require.Equal(t, actual.rootGlob, test.output)
}
}
func TestFindNestedTextFile(t *testing.T) {
// test super asterisk
g1, err := Compile(filepath.Join(testdataDir, "**.txt"))
require.NoError(t, err)
matches := g1.Match()
require.Len(t, matches, 1)
}
func TestMatch_ErrPermission(t *testing.T) {
tests := []struct {
input string
expected []string
}{
{"/root/foo", []string(nil)},
{"/root/f*", []string(nil)},
}
for _, test := range tests {
glob, err := Compile(test.input)
require.NoError(t, err)
actual := glob.Match()
require.Equal(t, test.expected, actual)
}
}
func TestWindowsSeparator(t *testing.T) {
//nolint:staticcheck // Silence linter for now as we plan to reenable tests for Windows later
if runtime.GOOS != "windows" {
t.Skip("Skipping Windows only test")
}
glob, err := Compile("testdata/nested1")
require.NoError(t, err)
ok := glob.MatchString("testdata\\nested1")
require.True(t, ok)
}
func getTestdataDir() string {
dir, err := os.Getwd()
if err != nil {
// if we cannot even establish the test directory, further progress is meaningless
panic(err)
}
return filepath.Join(dir, "testdata")
}

0
internal/globpath/testdata/log1.log vendored Normal file
View file

0
internal/globpath/testdata/log2.log vendored Normal file
View file

0
internal/globpath/testdata/log[!.log vendored Normal file
View file

View file

5
internal/globpath/testdata/test.conf vendored Normal file
View file

@ -0,0 +1,5 @@
# this is a fake testing config file
# for testing the filestat plugin
option1 = "foo"
option2 = "bar"

View file

@ -0,0 +1,9 @@
//go:build !goplugin
package goplugin
import "errors"
func LoadExternalPlugins(_ string) error {
return errors.New("go plugin support is not enabled")
}

View file

@ -0,0 +1,42 @@
//go:build goplugin
package goplugin
import (
"fmt"
"os"
"path"
"path/filepath"
"plugin"
"strings"
)
// loadExternalPlugins loads external plugins from shared libraries (.so, .dll, etc.)
// in the specified directory.
func LoadExternalPlugins(rootDir string) error {
return filepath.Walk(rootDir, func(pth string, info os.FileInfo, err error) error {
// Stop if there was an error.
if err != nil {
return err
}
// Ignore directories.
if info.IsDir() {
return nil
}
// Ignore files that aren't shared libraries.
ext := strings.ToLower(path.Ext(pth))
if ext != ".so" && ext != ".dll" {
return nil
}
// Load plugin.
_, err = plugin.Open(pth)
if err != nil {
return fmt.Errorf("error loading %s: %s", pth, err)
}
return nil
})
}

View file

@ -0,0 +1,7 @@
//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || sparc || sparc64
package internal
import "encoding/binary"
var HostEndianness = binary.BigEndian

View file

@ -0,0 +1,7 @@
//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv || riscv64 || wasm
package internal
import "encoding/binary"
var HostEndianness = binary.LittleEndian

227
internal/http.go Normal file
View file

@ -0,0 +1,227 @@
package internal
import (
"crypto/subtle"
"errors"
"fmt"
"net"
"net/http"
"net/url"
"strings"
"github.com/golang-jwt/jwt/v5"
)
type BasicAuthErrorFunc func(rw http.ResponseWriter)
// JWTAuthHandler returns a http handler that requires the HTTP bearer auth
// token to be valid and match the given user.
func JWTAuthHandler(secret, username string, onError BasicAuthErrorFunc) func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return &jwtAuthHandler{
secret: []byte(secret),
username: []byte(username),
onError: onError,
next: h,
}
}
}
type jwtAuthHandler struct {
secret []byte
username []byte
onError BasicAuthErrorFunc
next http.Handler
}
func (h *jwtAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
authHeader := req.Header.Get("Authentication")
if !strings.HasPrefix(authHeader, "Bearer ") {
h.onError(rw)
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
bearer := strings.TrimPrefix(authHeader, "Bearer ")
token, err := jwt.Parse(bearer, func(t *jwt.Token) (interface{}, error) {
if _, ok := t.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", t.Method)
}
return h.secret, nil
})
if err != nil || !token.Valid {
h.onError(rw)
if err != nil && errors.Is(err, jwt.ErrTokenExpired) {
http.Error(rw, "token expired", http.StatusUnauthorized)
} else if err != nil {
http.Error(rw, "invalid token: "+err.Error(), http.StatusUnauthorized)
} else {
http.Error(rw, "invalid token", http.StatusUnauthorized)
}
return
}
claims, ok := token.Claims.(jwt.MapClaims)
if !ok {
h.onError(rw)
http.Error(rw, "problem authenticating token", http.StatusInternalServerError)
return
}
username, ok := claims["username"].(string)
if !ok || username == "" {
h.onError(rw)
http.Error(rw, "token must contain a string username", http.StatusUnauthorized)
return
}
if subtle.ConstantTimeCompare([]byte(username), h.username) != 1 {
h.onError(rw)
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
h.next.ServeHTTP(rw, req)
}
// BasicAuthHandler returns a http handler that requires HTTP basic auth
// credentials to match the given username and password.
func BasicAuthHandler(username, password, realm string, onError BasicAuthErrorFunc) func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return &basicAuthHandler{
username: username,
password: password,
realm: realm,
onError: onError,
next: h,
}
}
}
type basicAuthHandler struct {
username string
password string
realm string
onError BasicAuthErrorFunc
next http.Handler
}
func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if h.username == "" && h.password == "" {
h.next.ServeHTTP(rw, req)
return
}
var reqUsername, reqPassword string
var ok bool
authHeader := req.Header.Get("Authorization")
if strings.HasPrefix(authHeader, "Token ") {
token := strings.TrimPrefix(authHeader, "Token ")
reqUsername, reqPassword, ok = strings.Cut(token, ":")
} else {
reqUsername, reqPassword, ok = req.BasicAuth()
}
if !ok ||
subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.username)) != 1 ||
subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.password)) != 1 {
rw.Header().Set("WWW-Authenticate", "Basic realm=\""+h.realm+"\"")
h.onError(rw)
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
h.next.ServeHTTP(rw, req)
}
type GenericAuthErrorFunc func(rw http.ResponseWriter)
// GenericAuthHandler returns a http handler that requires `Authorization: <credentials>`
func GenericAuthHandler(credentials string, onError GenericAuthErrorFunc) func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return &genericAuthHandler{
credentials: credentials,
onError: onError,
next: h,
}
}
}
// Generic auth scheme handler - exact match on `Authorization: <credentials>`
type genericAuthHandler struct {
credentials string
onError GenericAuthErrorFunc
next http.Handler
}
func (h *genericAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if h.credentials != "" {
// Scheme checking
authorization := req.Header.Get("Authorization")
if subtle.ConstantTimeCompare([]byte(authorization), []byte(h.credentials)) != 1 {
h.onError(rw)
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
}
h.next.ServeHTTP(rw, req)
}
// ErrorFunc is a callback for writing an error response.
type ErrorFunc func(rw http.ResponseWriter, code int)
// IPRangeHandler returns a http handler that requires the remote address to be
// in the specified network.
func IPRangeHandler(networks []*net.IPNet, onError ErrorFunc) func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return &ipRangeHandler{
networks: networks,
onError: onError,
next: h,
}
}
}
type ipRangeHandler struct {
networks []*net.IPNet
onError ErrorFunc
next http.Handler
}
func (h *ipRangeHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if len(h.networks) == 0 {
h.next.ServeHTTP(rw, req)
return
}
remoteIPString, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
h.onError(rw, http.StatusForbidden)
return
}
remoteIP := net.ParseIP(remoteIPString)
if remoteIP == nil {
h.onError(rw, http.StatusForbidden)
return
}
for _, network := range h.networks {
if network.Contains(remoteIP) {
h.next.ServeHTTP(rw, req)
return
}
}
h.onError(rw, http.StatusForbidden)
}
func OnClientError(client *http.Client, err error) {
// Close connection after a timeout error. If this is a HTTP2
// connection this ensures that next interval a new connection will be
// used and name lookup will be performed.
// https://github.com/golang/go/issues/36026
var urlErr *url.Error
if errors.As(err, &urlErr) && urlErr.Timeout() {
client.CloseIdleConnections()
}
}

441
internal/internal.go Normal file
View file

@ -0,0 +1,441 @@
package internal
import (
"bufio"
"compress/gzip"
"context"
crypto_rand "crypto/rand"
"errors"
"fmt"
"io"
"log"
"math/big"
"math/rand"
"os"
"os/exec"
"runtime"
"strings"
"sync"
"syscall"
"time"
"unicode"
"github.com/influxdata/telegraf/internal/choice"
)
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
const NoMetricsCreatedMsg = "No metrics were created from a message. Verify your parser settings. This message is only printed once."
var once sync.Once
var (
ErrTimeout = errors.New("command timed out")
ErrNotImplemented = errors.New("not implemented yet")
)
// Set via LDFLAGS -X
var (
Version = "unknown"
Branch = ""
Commit = ""
)
type ReadWaitCloser struct {
pipeReader *io.PipeReader
wg sync.WaitGroup
}
func FormatFullVersion() string {
var parts = []string{"Telegraf"}
if Version != "" {
parts = append(parts, Version)
} else {
parts = append(parts, "unknown")
}
if Branch != "" || Commit != "" {
if Branch == "" {
Branch = "unknown"
}
if Commit == "" {
Commit = "unknown"
}
git := fmt.Sprintf("(git: %s@%s)", Branch, Commit)
parts = append(parts, git)
}
return strings.Join(parts, " ")
}
// ProductToken returns a tag for Telegraf that can be used in user agents.
func ProductToken() string {
return fmt.Sprintf("Telegraf/%s Go/%s",
Version, strings.TrimPrefix(runtime.Version(), "go"))
}
// ReadLines reads contents from a file and splits them by new lines.
func ReadLines(filename string) ([]string, error) {
f, err := os.Open(filename)
if err != nil {
return []string{""}, err
}
defer f.Close()
var ret []string
scanner := bufio.NewScanner(f)
for scanner.Scan() {
ret = append(ret, scanner.Text())
}
return ret, nil
}
// RandomString returns a random string of alphanumeric characters
func RandomString(n int) (string, error) {
var bytes = make([]byte, n)
_, err := crypto_rand.Read(bytes)
if err != nil {
return "", err
}
for i, b := range bytes {
bytes[i] = alphanum[b%byte(len(alphanum))]
}
return string(bytes), nil
}
// SnakeCase converts the given string to snake case following the Golang format:
// acronyms are converted to lower-case and preceded by an underscore.
func SnakeCase(in string) string {
runes := []rune(in)
length := len(runes)
var out []rune
for i := 0; i < length; i++ {
if i > 0 && unicode.IsUpper(runes[i]) {
prevLower := unicode.IsLower(runes[i-1])
nextLower := i+1 < length && unicode.IsLower(runes[i+1])
// Special case for plural acronyms
nextPlural := i+1 < length && runes[i+1] == 's'
if prevLower || (nextLower && !nextPlural) {
out = append(out, '_')
}
}
out = append(out, unicode.ToLower(runes[i]))
}
return string(out)
}
// RandomSleep will sleep for a random amount of time up to max.
// If the shutdown channel is closed, it will return before it has finished sleeping.
func RandomSleep(limit time.Duration, shutdown chan struct{}) {
sleepDuration := RandomDuration(limit)
if sleepDuration == 0 {
return
}
t := time.NewTimer(time.Nanosecond * sleepDuration)
select {
case <-t.C:
return
case <-shutdown:
t.Stop()
return
}
}
// RandomDuration returns a random duration between 0 and max.
func RandomDuration(limit time.Duration) time.Duration {
if limit == 0 {
return 0
}
return time.Duration(rand.Int63n(limit.Nanoseconds())) //nolint:gosec // G404: not security critical
}
// SleepContext sleeps until the context is closed or the duration is reached.
func SleepContext(ctx context.Context, duration time.Duration) error {
if duration == 0 {
return nil
}
t := time.NewTimer(duration)
select {
case <-t.C:
return nil
case <-ctx.Done():
t.Stop()
return ctx.Err()
}
}
// AlignDuration returns the duration until next aligned interval.
// If the current time is aligned a 0 duration is returned.
func AlignDuration(tm time.Time, interval time.Duration) time.Duration {
return AlignTime(tm, interval).Sub(tm)
}
// AlignTime returns the time of the next aligned interval.
// If the current time is aligned the current time is returned.
func AlignTime(tm time.Time, interval time.Duration) time.Time {
truncated := tm.Truncate(interval)
if truncated.Equal(tm) {
return tm
}
return truncated.Add(interval)
}
// ExitStatus takes the error from exec.Command
// and returns the exit status and true
// if error is not exit status, will return 0 and false
func ExitStatus(err error) (int, bool) {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
return status.ExitStatus(), true
}
}
return 0, false
}
func (r *ReadWaitCloser) Close() error {
err := r.pipeReader.Close()
r.wg.Wait() // wait for the gzip goroutine finish
return err
}
// CompressWithGzip takes an io.Reader as input and pipes it through a
// gzip.Writer returning an io.Reader containing the gzipped data.
// Errors occurring during compression are returned to the instance reading
// from the returned reader via through the corresponding read call
// (e.g. io.Copy or io.ReadAll).
func CompressWithGzip(data io.Reader) io.ReadCloser {
pipeReader, pipeWriter := io.Pipe()
gzipWriter := gzip.NewWriter(pipeWriter)
// Start copying from the uncompressed reader to the output reader
// in the background until the input reader is closed (or errors out).
go func() {
// This copy will block until "data" reached EOF or an error occurs
_, err := io.Copy(gzipWriter, data)
// Close the compression writer and make sure we do not overwrite
// the copy error if any.
gzipErr := gzipWriter.Close()
if err == nil {
err = gzipErr
}
// Subsequent reads from the output reader (connected to "pipeWriter"
// via pipe) will return the copy (or closing) error if any to the
// instance reading from the reader returned by the CompressWithGzip
// function. If "err" is nil, the below function will correctly report
// io.EOF.
pipeWriter.CloseWithError(err)
}()
// Return a reader which then can be read by the caller to collect the
// compressed stream.
return pipeReader
}
// ParseTimestamp parses a Time according to the standard Telegraf options.
// These are generally displayed in the toml similar to:
//
// json_time_key= "timestamp"
// json_time_format = "2006-01-02T15:04:05Z07:00"
// json_timezone = "America/Los_Angeles"
//
// The format can be one of "unix", "unix_ms", "unix_us", "unix_ns", or a Go
// time layout suitable for time.Parse.
//
// When using the "unix" format, an optional fractional component is allowed.
// Specific unix time precisions cannot have a fractional component.
//
// Unix times may be an int64, float64, or string. When using a Go format
// string the timestamp must be a string.
//
// The location is a location string suitable for time.LoadLocation. Unix
// times do not use the location string, a unix time is always return in the
// UTC location.
func ParseTimestamp(format string, timestamp interface{}, location *time.Location, separator ...string) (time.Time, error) {
switch format {
case "unix", "unix_ms", "unix_us", "unix_ns":
sep := []string{",", "."}
if len(separator) > 0 {
sep = separator
}
return parseUnix(format, timestamp, sep)
default:
v, ok := timestamp.(string)
if !ok {
return time.Unix(0, 0), errors.New("unsupported type")
}
return parseTime(format, v, location)
}
}
// parseTime parses a timestamp in unix format with different resolutions
func parseUnix(format string, timestamp interface{}, separator []string) (time.Time, error) {
// Extract the scaling factor to nanoseconds from "format"
var factor int64
switch format {
case "unix":
factor = int64(time.Second)
case "unix_ms":
factor = int64(time.Millisecond)
case "unix_us":
factor = int64(time.Microsecond)
case "unix_ns":
factor = int64(time.Nanosecond)
}
zero := time.Unix(0, 0)
// Convert the representation to time
switch v := timestamp.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
t, err := ToInt64(v)
if err != nil {
return zero, err
}
return time.Unix(0, t*factor).UTC(), nil
case float32, float64:
ts, err := ToFloat64(v)
if err != nil {
return zero, err
}
// Parse the float as a precise fraction to avoid precision loss
f := big.Rat{}
if f.SetFloat64(ts) == nil {
return zero, errors.New("invalid number")
}
return timeFromFraction(&f, factor), nil
case string:
// Sanitize the string to have no thousand separators and dot
// as decimal separator to ease later parsing
v = sanitizeTimestamp(v, separator)
// Parse the string as a precise fraction to avoid precision loss
f := big.Rat{}
if _, ok := f.SetString(v); !ok {
return zero, errors.New("invalid number")
}
return timeFromFraction(&f, factor), nil
}
return zero, errors.New("unsupported type")
}
func timeFromFraction(f *big.Rat, factor int64) time.Time {
// Extract the numerator and denominator and scale to nanoseconds
num := f.Num()
denom := f.Denom()
num.Mul(num, big.NewInt(factor))
// Get the integer (non-fractional part) of the timestamp and convert
// it into time
t := big.Int{}
t.Div(num, denom)
return time.Unix(0, t.Int64()).UTC()
}
// sanitizeTimestamp removes thousand separators and uses dot as
// decimal separator. Returns also a boolean indicating success.
func sanitizeTimestamp(timestamp string, decimalSeparator []string) string {
// Remove thousand-separators that are not used for decimal separation
sanitized := timestamp
for _, s := range []string{" ", ",", "."} {
if !choice.Contains(s, decimalSeparator) {
sanitized = strings.ReplaceAll(sanitized, s, "")
}
}
// Replace decimal separators by dot to have a standard, parsable format
for _, s := range decimalSeparator {
// Make sure we replace only the first occurrence of any separator.
if strings.Contains(sanitized, s) {
return strings.Replace(sanitized, s, ".", 1)
}
}
return sanitized
}
// parseTime parses a string timestamp according to the format string.
func parseTime(format, timestamp string, location *time.Location) (time.Time, error) {
loc := location
if loc == nil {
loc = time.UTC
}
switch strings.ToLower(format) {
case "ansic":
format = time.ANSIC
case "unixdate":
format = time.UnixDate
case "rubydate":
format = time.RubyDate
case "rfc822":
format = time.RFC822
case "rfc822z":
format = time.RFC822Z
case "rfc850":
format = time.RFC850
case "rfc1123":
format = time.RFC1123
case "rfc1123z":
format = time.RFC1123Z
case "rfc3339":
format = time.RFC3339
case "rfc3339nano":
format = time.RFC3339Nano
case "stamp":
format = time.Stamp
case "stampmilli":
format = time.StampMilli
case "stampmicro":
format = time.StampMicro
case "stampnano":
format = time.StampNano
}
if !strings.Contains(format, "MST") {
return time.ParseInLocation(format, timestamp, loc)
}
// Golang does not parse times with ambiguous timezone abbreviations,
// but only parses the time-fields and the timezone NAME with a zero
// offset (see https://groups.google.com/g/golang-nuts/c/hDMdnm_jUFQ/m/yeL9IHOsAQAJ).
// To handle those timezones correctly we can use the timezone-name and
// force parsing the time in that timezone. This way we get the correct
// time for the "most probably" of the ambiguous timezone-abbreviations.
ts, err := time.Parse(format, timestamp)
if err != nil {
return time.Time{}, err
}
zone, offset := ts.Zone()
if zone == "UTC" || offset != 0 {
return ts.In(loc), nil
}
once.Do(func() {
const msg = `Your config is using abbreviated timezones and parsing was changed in v1.27.0!
Please see the change log, remove any workarounds in place, and carefully
check your data timestamps! If case you experience any problems, please
file an issue on https://github.com/influxdata/telegraf/issues!`
log.Print("W! " + msg)
})
abbrevLoc, err := time.LoadLocation(zone)
if err != nil {
return time.Time{}, fmt.Errorf("cannot resolve timezone abbreviation %q: %w", zone, err)
}
ts, err = time.ParseInLocation(format, timestamp, abbrevLoc)
if err != nil {
return time.Time{}, err
}
return ts.In(loc), nil
}

784
internal/internal_test.go Normal file
View file

@ -0,0 +1,784 @@
package internal
import (
"bytes"
"compress/gzip"
"crypto/rand"
"io"
"log"
"os/exec"
"regexp"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
type SnakeTest struct {
input string
output string
}
var tests = []SnakeTest{
{"a", "a"},
{"snake", "snake"},
{"A", "a"},
{"ID", "id"},
{"MOTD", "motd"},
{"Snake", "snake"},
{"SnakeTest", "snake_test"},
{"APIResponse", "api_response"},
{"SnakeID", "snake_id"},
{"SnakeIDGoogle", "snake_id_google"},
{"LinuxMOTD", "linux_motd"},
{"OMGWTFBBQ", "omgwtfbbq"},
{"omg_wtf_bbq", "omg_wtf_bbq"},
{"ConsumedLCUs", "consumed_lcus"},
}
func TestSnakeCase(t *testing.T) {
for _, test := range tests {
t.Run(test.input, func(t *testing.T) {
require.Equal(t, test.output, SnakeCase(test.input))
})
}
}
func TestRunTimeout(t *testing.T) {
t.Skip("Skipping test due to random failures & a data race when running test-all.")
sleepbin, err := exec.LookPath("sleep")
if err != nil || sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
}
cmd := exec.Command(sleepbin, "10")
start := time.Now()
err = RunTimeout(cmd, time.Millisecond*20)
elapsed := time.Since(start)
require.Equal(t, ErrTimeout, err)
// Verify that command gets killed in 20ms, with some breathing room
require.Less(t, elapsed, time.Millisecond*75)
}
// Verifies behavior of a command that doesn't get killed.
func TestRunTimeoutFastExit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test due to random failures.")
}
echobin, err := exec.LookPath("echo")
if err != nil || echobin == "" {
t.Skip("'echo' binary not available on OS, skipping.")
}
cmd := exec.Command(echobin)
start := time.Now()
err = RunTimeout(cmd, time.Millisecond*20)
buf := &bytes.Buffer{}
log.SetOutput(buf)
elapsed := time.Since(start)
require.NoError(t, err)
// Verify that command gets killed in 20ms, with some breathing room
require.Less(t, elapsed, time.Millisecond*75)
// Verify "process already finished" log doesn't occur.
time.Sleep(time.Millisecond * 75)
require.Empty(t, buf.String())
}
func TestCombinedOutputTimeout(t *testing.T) {
// TODO: Fix this test
t.Skip("Test failing too often, skip for now and revisit later.")
sleepbin, err := exec.LookPath("sleep")
if err != nil || sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
}
cmd := exec.Command(sleepbin, "10")
start := time.Now()
_, err = CombinedOutputTimeout(cmd, time.Millisecond*20)
elapsed := time.Since(start)
require.Equal(t, ErrTimeout, err)
// Verify that command gets killed in 20ms, with some breathing room
require.Less(t, elapsed, time.Millisecond*75)
}
func TestCombinedOutput(t *testing.T) {
echobin, err := exec.LookPath("echo")
if err != nil || echobin == "" {
t.Skip("'echo' binary not available on OS, skipping.")
}
cmd := exec.Command(echobin, "foo")
out, err := CombinedOutputTimeout(cmd, time.Second)
require.NoError(t, err)
require.Equal(t, "foo\n", string(out))
}
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
// the same output from a failed command.
func TestCombinedOutputError(t *testing.T) {
shell, err := exec.LookPath("sh")
if err != nil || shell == "" {
t.Skip("'sh' binary not available on OS, skipping.")
}
cmd := exec.Command(shell, "-c", "false")
expected, err := cmd.CombinedOutput()
require.Error(t, err)
cmd2 := exec.Command(shell, "-c", "false")
actual, err := CombinedOutputTimeout(cmd2, time.Second)
require.Error(t, err)
require.Equal(t, expected, actual)
}
func TestRunError(t *testing.T) {
shell, err := exec.LookPath("sh")
if err != nil || shell == "" {
t.Skip("'sh' binary not available on OS, skipping.")
}
cmd := exec.Command(shell, "-c", "false")
err = RunTimeout(cmd, time.Second)
require.Error(t, err)
}
func TestRandomSleep(t *testing.T) {
// TODO: Fix this test
t.Skip("Test failing too often, skip for now and revisit later.")
// test that zero max returns immediately
s := time.Now()
RandomSleep(time.Duration(0), make(chan struct{}))
elapsed := time.Since(s)
require.Less(t, elapsed, time.Millisecond)
// test that max sleep is respected
s = time.Now()
RandomSleep(time.Millisecond*50, make(chan struct{}))
elapsed = time.Since(s)
require.Less(t, elapsed, time.Millisecond*100)
// test that shutdown is respected
s = time.Now()
shutdown := make(chan struct{})
go func() {
time.Sleep(time.Millisecond * 100)
close(shutdown)
}()
RandomSleep(time.Second, shutdown)
elapsed = time.Since(s)
require.Less(t, elapsed, time.Millisecond*150)
}
func TestCompressWithGzip(t *testing.T) {
testData := "the quick brown fox jumps over the lazy dog"
inputBuffer := bytes.NewBufferString(testData)
outputBuffer := CompressWithGzip(inputBuffer)
gzipReader, err := gzip.NewReader(outputBuffer)
require.NoError(t, err)
defer gzipReader.Close()
output, err := io.ReadAll(gzipReader)
require.NoError(t, err)
require.Equal(t, testData, string(output))
}
type mockReader struct {
err error
ncalls uint64 // record the number of calls to Read
msg []byte
}
func (r *mockReader) Read(p []byte) (n int, err error) {
r.ncalls++
if len(r.msg) > 0 {
n, err = copy(p, r.msg), io.EOF
} else {
n, err = rand.Read(p)
}
if r.err == nil {
return n, err
}
return n, r.err
}
func TestCompressWithGzipEarlyClose(t *testing.T) {
mr := &mockReader{}
rc := CompressWithGzip(mr)
n, err := io.CopyN(io.Discard, rc, 10000)
require.NoError(t, err)
require.Equal(t, int64(10000), n)
r1 := mr.ncalls
require.NoError(t, rc.Close())
n, err = io.CopyN(io.Discard, rc, 10000)
require.ErrorIs(t, err, io.ErrClosedPipe)
require.Equal(t, int64(0), n)
r2 := mr.ncalls
// no more read to the source after closing
require.Equal(t, r1, r2)
}
func TestCompressWithGzipErrorPropagationCopy(t *testing.T) {
errs := []error{io.ErrClosedPipe, io.ErrNoProgress, io.ErrUnexpectedEOF}
for _, expected := range errs {
r := &mockReader{msg: []byte("this is a test"), err: expected}
rc := CompressWithGzip(r)
n, err := io.Copy(io.Discard, rc)
require.Positive(t, n)
require.ErrorIs(t, err, expected)
require.NoError(t, rc.Close())
}
}
func TestCompressWithGzipErrorPropagationReadAll(t *testing.T) {
errs := []error{io.ErrClosedPipe, io.ErrNoProgress, io.ErrUnexpectedEOF}
for _, expected := range errs {
r := &mockReader{msg: []byte("this is a test"), err: expected}
rc := CompressWithGzip(r)
buf, err := io.ReadAll(rc)
require.NotEmpty(t, buf)
require.ErrorIs(t, err, expected)
require.NoError(t, rc.Close())
}
}
func TestAlignDuration(t *testing.T) {
tests := []struct {
name string
now time.Time
interval time.Duration
expected time.Duration
}{
{
name: "aligned",
now: time.Date(2018, 1, 1, 1, 1, 0, 0, time.UTC),
interval: 10 * time.Second,
expected: 0 * time.Second,
},
{
name: "standard interval",
now: time.Date(2018, 1, 1, 1, 1, 1, 0, time.UTC),
interval: 10 * time.Second,
expected: 9 * time.Second,
},
{
name: "odd interval",
now: time.Date(2018, 1, 1, 1, 1, 1, 0, time.UTC),
interval: 3 * time.Second,
expected: 2 * time.Second,
},
{
name: "sub second interval",
now: time.Date(2018, 1, 1, 1, 1, 0, 5e8, time.UTC),
interval: 1 * time.Second,
expected: 500 * time.Millisecond,
},
{
name: "non divisible not aligned on minutes",
now: time.Date(2018, 1, 1, 1, 0, 0, 0, time.UTC),
interval: 1*time.Second + 100*time.Millisecond,
expected: 400 * time.Millisecond,
},
{
name: "long interval",
now: time.Date(2018, 1, 1, 1, 1, 0, 0, time.UTC),
interval: 1 * time.Hour,
expected: 59 * time.Minute,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual := AlignDuration(tt.now, tt.interval)
require.Equal(t, tt.expected, actual)
})
}
}
func TestAlignTime(t *testing.T) {
rfc3339 := func(value string) time.Time {
tt, err := time.Parse(time.RFC3339, value)
require.NoError(t, err)
return tt
}
tests := []struct {
name string
now time.Time
interval time.Duration
expected time.Time
}{
{
name: "aligned",
now: rfc3339("2018-01-01T01:01:00Z"),
interval: 10 * time.Second,
expected: rfc3339("2018-01-01T01:01:00Z"),
},
{
name: "aligned",
now: rfc3339("2018-01-01T01:01:01Z"),
interval: 10 * time.Second,
expected: rfc3339("2018-01-01T01:01:10Z"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual := AlignTime(tt.now, tt.interval)
require.Equal(t, tt.expected, actual)
})
}
}
func TestParseTimestamp(t *testing.T) {
rfc3339 := func(value string) time.Time {
tm, err := time.Parse(time.RFC3339Nano, value)
require.NoError(t, err)
return tm
}
ansic := func(value string) time.Time {
tm, err := time.Parse(time.ANSIC, value)
require.NoError(t, err)
return tm
}
rubydate := func(value string) time.Time {
tm, err := time.Parse(time.RubyDate, value)
require.NoError(t, err)
return tm
}
rfc822z := func(value string) time.Time {
tm, err := time.Parse(time.RFC822Z, value)
require.NoError(t, err)
return tm
}
rfc1123z := func(value string) time.Time {
tm, err := time.Parse(time.RFC1123Z, value)
require.NoError(t, err)
return tm
}
rfc3339nano := func(value string) time.Time {
tm, err := time.Parse(time.RFC3339Nano, value)
require.NoError(t, err)
return tm
}
stamp := func(value string) time.Time {
tm, err := time.Parse(time.Stamp, value)
require.NoError(t, err)
return tm
}
stampmilli := func(value string) time.Time {
tm, err := time.Parse(time.StampMilli, value)
require.NoError(t, err)
return tm
}
stampmicro := func(value string) time.Time {
tm, err := time.Parse(time.StampMicro, value)
require.NoError(t, err)
return tm
}
stampnano := func(value string) time.Time {
tm, err := time.Parse(time.StampNano, value)
require.NoError(t, err)
return tm
}
tests := []struct {
name string
format string
timestamp interface{}
location string
separator []string
expected time.Time
}{
{
name: "parse layout string in utc",
format: "2006-01-02 15:04:05",
timestamp: "2019-02-20 21:50:34",
location: "UTC",
expected: rfc3339("2019-02-20T21:50:34Z"),
},
{
name: "layout regression 6386",
format: "02.01.2006 15:04:05",
timestamp: "09.07.2019 00:11:00",
expected: rfc3339("2019-07-09T00:11:00Z"),
},
{
name: "default location is utc",
format: "2006-01-02 15:04:05",
timestamp: "2019-02-20 21:50:34",
expected: rfc3339("2019-02-20T21:50:34Z"),
},
{
name: "unix seconds without fractional",
format: "unix",
timestamp: "1568338208",
expected: rfc3339("2019-09-13T01:30:08Z"),
},
{
name: "unix seconds with fractional",
format: "unix",
timestamp: "1568338208.500",
expected: rfc3339("2019-09-13T01:30:08.500Z"),
},
{
name: "unix seconds with fractional and comma decimal point",
format: "unix",
timestamp: "1568338208,500",
expected: rfc3339("2019-09-13T01:30:08.500Z"),
},
{
name: "unix seconds extra precision",
format: "unix",
timestamp: "1568338208.00000050042",
expected: rfc3339("2019-09-13T01:30:08.000000500Z"),
},
{
name: "unix seconds with thousand separator only (dot)",
format: "unix",
timestamp: "1.568.338.208",
separator: []string{","},
expected: rfc3339("2019-09-13T01:30:08Z"),
},
{
name: "unix seconds with thousand separator only (comma)",
format: "unix",
timestamp: "1,568,338,208",
separator: []string{"."},
expected: rfc3339("2019-09-13T01:30:08Z"),
},
{
name: "unix seconds with thousand separator only (space)",
format: "unix",
timestamp: "1 568 338 208",
separator: []string{"."},
expected: rfc3339("2019-09-13T01:30:08Z"),
},
{
name: "unix seconds with thousand separator only (underscore)",
format: "unix",
timestamp: "1_568_338_208",
separator: []string{"."},
expected: rfc3339("2019-09-13T01:30:08Z"),
},
{
name: "unix seconds with thousand and decimal separator (US)",
format: "unix",
timestamp: "1,568,338,208.500",
separator: []string{"."},
expected: rfc3339("2019-09-13T01:30:08.500Z"),
},
{
name: "unix seconds with thousand and decimal separator (EU)",
format: "unix",
timestamp: "1.568.338.208,500",
separator: []string{","},
expected: rfc3339("2019-09-13T01:30:08.500Z"),
},
{
name: "unix seconds integer",
format: "unix",
timestamp: int64(1568338208),
expected: rfc3339("2019-09-13T01:30:08Z"),
},
{
name: "unix seconds float",
format: "unix",
timestamp: float64(1568338208.500),
expected: rfc3339("2019-09-13T01:30:08.500Z"),
},
{
name: "unix seconds float exponential",
format: "unix",
timestamp: float64(1.5683382085e+9),
expected: rfc3339("2019-09-13T01:30:08.500Z"),
},
{
name: "unix milliseconds",
format: "unix_ms",
timestamp: "1568338208500",
expected: rfc3339("2019-09-13T01:30:08.500Z"),
},
{
name: "unix milliseconds with fractional",
format: "unix_ms",
timestamp: "1568338208500.42",
expected: rfc3339("2019-09-13T01:30:08.50042Z"),
},
{
name: "unix microseconds",
format: "unix_us",
timestamp: "1568338208000500",
expected: rfc3339("2019-09-13T01:30:08.000500Z"),
},
{
name: "unix nanoseconds",
format: "unix_ns",
timestamp: "1568338208000000500",
expected: rfc3339("2019-09-13T01:30:08.000000500Z"),
},
{
name: "unix nanoseconds exponential",
format: "unix_ns",
timestamp: "1.5683382080000005e+18",
expected: rfc3339("2019-09-13T01:30:08.000000500Z"),
},
{
name: "rfc339 test",
format: "RFC3339",
timestamp: "2018-10-26T13:30:33Z",
expected: rfc3339("2018-10-26T13:30:33Z"),
},
{
name: "ANSIC",
format: "ANSIC",
timestamp: "Mon Jan 2 15:04:05 2006",
expected: ansic("Mon Jan 2 15:04:05 2006"),
},
{
name: "UnixDate",
format: "UnixDate",
timestamp: "Mon Jan 2 15:04:05 MST 2006",
expected: time.Unix(1136239445, 0),
location: "Local",
},
{
name: "RubyDate",
format: "RubyDate",
timestamp: "Mon Jan 02 15:04:05 -0700 2006",
expected: rubydate("Mon Jan 02 15:04:05 -0700 2006"),
location: "Local",
},
{
name: "RFC822",
format: "RFC822",
timestamp: "02 Jan 06 15:04 MST",
expected: time.Unix(1136239440, 0),
location: "Local",
},
{
name: "RFC822Z",
format: "RFC822Z",
timestamp: "02 Jan 06 15:04 -0700",
expected: rfc822z("02 Jan 06 15:04 -0700"),
location: "Local",
},
{
name: "RFC850",
format: "RFC850",
timestamp: "Monday, 02-Jan-06 15:04:05 MST",
expected: time.Unix(1136239445, 0),
location: "Local",
},
{
name: "RFC1123",
format: "RFC1123",
timestamp: "Mon, 02 Jan 2006 15:04:05 MST",
expected: time.Unix(1136239445, 0),
location: "Local",
},
{
name: "RFC1123Z",
format: "RFC1123Z",
timestamp: "Mon, 02 Jan 2006 15:04:05 -0700",
expected: rfc1123z("Mon, 02 Jan 2006 15:04:05 -0700"),
location: "Local",
},
{
name: "RFC3339Nano",
format: "RFC3339Nano",
timestamp: "2006-01-02T15:04:05.999999999-07:00",
expected: rfc3339nano("2006-01-02T15:04:05.999999999-07:00"),
location: "Local",
},
{
name: "Stamp",
format: "Stamp",
timestamp: "Jan 2 15:04:05",
expected: stamp("Jan 2 15:04:05"),
},
{
name: "StampMilli",
format: "StampMilli",
timestamp: "Jan 2 15:04:05.000",
expected: stampmilli("Jan 2 15:04:05.000"),
},
{
name: "StampMicro",
format: "StampMicro",
timestamp: "Jan 2 15:04:05.000000",
expected: stampmicro("Jan 2 15:04:05.000000"),
},
{
name: "StampNano",
format: "StampNano",
timestamp: "Jan 2 15:04:05.000000000",
expected: stampnano("Jan 2 15:04:05.000000000"),
},
{
name: "RFC850",
format: "RFC850",
timestamp: "Monday, 02-Jan-06 15:04:05 MST",
expected: time.Unix(1136239445, 0),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Ensure any one-time warnings are printed for each test
once = sync.Once{}
// Ensure the warnings are captured and not to stdout
var buf bytes.Buffer
backup := log.Writer()
log.SetOutput(&buf)
defer log.SetOutput(backup)
var loc *time.Location
if tt.location != "" {
var err error
loc, err = time.LoadLocation(tt.location)
require.NoError(t, err)
}
tm, err := ParseTimestamp(tt.format, tt.timestamp, loc, tt.separator...)
require.NoError(t, err)
require.Equal(t, tt.expected.Unix(), tm.Unix())
})
}
}
func TestParseTimestampInvalid(t *testing.T) {
tests := []struct {
name string
format string
timestamp interface{}
expected string
}{
{
name: "too few digits",
format: "2006-01-02 15:04:05",
timestamp: "2019-02-20 21:50",
expected: "cannot parse \"\" as \":\"",
},
{
name: "invalid layout",
format: "rfc3399",
timestamp: "09.07.2019 00:11:00",
expected: "cannot parse \"09.07.2019 00:11:00\" as \"rfc\"",
},
{
name: "layout not matching time",
format: "rfc3339",
timestamp: "09.07.2019 00:11:00",
expected: "parsing time \"09.07.2019 00:11:00\" as \"2006-01-02T15:04:05Z07:00\": cannot parse",
},
{
name: "unix wrong type",
format: "unix",
timestamp: true,
expected: "unsupported type",
},
{
name: "unix multiple separators (dot)",
format: "unix",
timestamp: "1568338.208.500",
expected: "invalid number",
},
{
name: "unix multiple separators (comma)",
format: "unix",
timestamp: "1568338,208,500",
expected: "invalid number",
},
{
name: "unix multiple separators (mixed)",
format: "unix",
timestamp: "1,568,338,208.500",
expected: "invalid number",
},
{
name: "invalid timezone abbreviation",
format: "RFC850",
timestamp: "Monday, 02-Jan-06 15:04:05 CDT",
expected: "cannot resolve timezone abbreviation",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Ensure any one-time warnings are printed for each test
once = sync.Once{}
// Ensure the warnings are captured and not to stdout
var buf bytes.Buffer
backup := log.Writer()
log.SetOutput(&buf)
defer log.SetOutput(backup)
_, err := ParseTimestamp(tt.format, tt.timestamp, nil)
require.ErrorContains(t, err, tt.expected)
})
}
}
func TestTimestampAbbrevWarning(t *testing.T) {
// Ensure any one-time warnings are printed for each test
once = sync.Once{}
// Ensure the warnings are captured and not to stdout
var buf bytes.Buffer
backup := log.Writer()
log.SetOutput(&buf)
defer log.SetOutput(backup)
// Try multiple timestamps with abbreviated timezones in case a user
// is actually in one of the timezones.
ts, err := ParseTimestamp("RFC1123", "Mon, 02 Jan 2006 15:04:05 MST", nil)
require.NoError(t, err)
require.EqualValues(t, 1136239445, ts.Unix())
ts2, err := ParseTimestamp("RFC1123", "Mon, 02 Jan 2006 15:04:05 EST", nil)
require.NoError(t, err)
require.EqualValues(t, 1136232245, ts2.Unix())
require.Contains(t, buf.String(), "Your config is using abbreviated timezones and parsing was changed in v1.27.0")
}
func TestProductToken(t *testing.T) {
token := ProductToken()
// Telegraf version depends on the call to SetVersion, it cannot be set
// multiple times and is not thread-safe.
re := regexp.MustCompile(`^Telegraf/[^\s]+ Go/\d+.\d+(.\d+)?$`)
require.True(t, re.MatchString(token), token)
}

View file

@ -0,0 +1,59 @@
package limiter
import (
"sync"
"time"
)
// NewRateLimiter returns a rate limiter that will emit from the C
// channel only 'n' times every 'rate' seconds.
func NewRateLimiter(n int, rate time.Duration) *rateLimiter {
r := &rateLimiter{
C: make(chan bool),
rate: rate,
n: n,
shutdown: make(chan bool),
}
r.wg.Add(1)
go r.limiter()
return r
}
type rateLimiter struct {
C chan bool
rate time.Duration
n int
shutdown chan bool
wg sync.WaitGroup
}
func (r *rateLimiter) Stop() {
close(r.shutdown)
r.wg.Wait()
close(r.C)
}
func (r *rateLimiter) limiter() {
defer r.wg.Done()
ticker := time.NewTicker(r.rate)
defer ticker.Stop()
counter := 0
for {
select {
case <-r.shutdown:
return
case <-ticker.C:
counter = 0
default:
if counter < r.n {
select {
case r.C <- true:
counter++
case <-r.shutdown:
return
}
}
}
}
}

215
internal/process/process.go Normal file
View file

@ -0,0 +1,215 @@
package process
import (
"context"
"errors"
"fmt"
"io"
"os"
"os/exec"
"sync"
"sync/atomic"
"time"
"github.com/influxdata/telegraf"
)
// Process is a long-running process manager that will restart processes if they stop.
type Process struct {
Cmd *exec.Cmd
Stdin io.WriteCloser
Stdout io.ReadCloser
Stderr io.ReadCloser
ReadStdoutFn func(io.Reader)
ReadStderrFn func(io.Reader)
RestartDelay time.Duration
StopOnError bool
Log telegraf.Logger
name string
args []string
envs []string
pid int32
cancel context.CancelFunc
mainLoopWg sync.WaitGroup
sync.Mutex
}
// New creates a new process wrapper
func New(command, envs []string) (*Process, error) {
if len(command) == 0 {
return nil, errors.New("no command")
}
p := &Process{
RestartDelay: 5 * time.Second,
name: command[0],
args: make([]string, 0),
envs: envs,
}
if len(command) > 1 {
p.args = command[1:]
}
return p, nil
}
// Start the process. A &Process can only be started once. It will restart itself
// as necessary.
func (p *Process) Start() error {
ctx, cancel := context.WithCancel(context.Background())
p.cancel = cancel
if err := p.cmdStart(); err != nil {
return err
}
p.mainLoopWg.Add(1)
go func() {
defer p.mainLoopWg.Done()
if err := p.cmdLoop(ctx); err != nil {
p.Log.Errorf("Process quit with message: %v", err)
}
}()
return nil
}
// Stop is called when the process isn't needed anymore
func (p *Process) Stop() {
if p.cancel != nil {
// signal our intent to shut down and not restart the process
p.cancel()
}
// close stdin so the app can shut down gracefully.
if err := p.Stdin.Close(); err != nil && !errors.Is(err, os.ErrClosed) {
p.Log.Errorf("Stdin closed with message: %v", err)
}
p.mainLoopWg.Wait()
}
func (p *Process) Pid() int {
pid := atomic.LoadInt32(&p.pid)
return int(pid)
}
func (p *Process) State() (state *os.ProcessState, running bool) {
p.Lock()
defer p.Unlock()
return p.Cmd.ProcessState, p.Cmd.ProcessState.ExitCode() == -1
}
func (p *Process) cmdStart() error {
p.Cmd = exec.Command(p.name, p.args...)
if len(p.envs) > 0 {
p.Cmd.Env = append(os.Environ(), p.envs...)
}
var err error
p.Stdin, err = p.Cmd.StdinPipe()
if err != nil {
return fmt.Errorf("error opening stdin pipe: %w", err)
}
p.Stdout, err = p.Cmd.StdoutPipe()
if err != nil {
return fmt.Errorf("error opening stdout pipe: %w", err)
}
p.Stderr, err = p.Cmd.StderrPipe()
if err != nil {
return fmt.Errorf("error opening stderr pipe: %w", err)
}
p.Log.Infof("Starting process: %s %s", p.name, p.args)
if err := p.Cmd.Start(); err != nil {
return fmt.Errorf("error starting process: %w", err)
}
atomic.StoreInt32(&p.pid, int32(p.Cmd.Process.Pid))
return nil
}
// cmdLoop watches an already running process, restarting it when appropriate.
func (p *Process) cmdLoop(ctx context.Context) error {
for {
err := p.cmdWait(ctx)
if err != nil && p.StopOnError {
return err
}
if isQuitting(ctx) {
p.Log.Infof("Process %s shut down", p.Cmd.Path)
return nil
}
p.Log.Errorf("Process %s exited: %v", p.Cmd.Path, err)
p.Log.Infof("Restarting in %s...", p.RestartDelay)
select {
case <-ctx.Done():
return nil
case <-time.After(p.RestartDelay):
// Continue the loop and restart the process
if err := p.cmdStart(); err != nil {
return err
}
}
}
}
// cmdWait waits for the process to finish.
func (p *Process) cmdWait(ctx context.Context) error {
var wg sync.WaitGroup
if p.ReadStdoutFn == nil {
p.ReadStdoutFn = defaultReadPipe
}
if p.ReadStderrFn == nil {
p.ReadStderrFn = defaultReadPipe
}
processCtx, processCancel := context.WithCancel(context.Background())
defer processCancel()
wg.Add(1)
go func() {
p.ReadStdoutFn(p.Stdout)
wg.Done()
}()
wg.Add(1)
go func() {
p.ReadStderrFn(p.Stderr)
wg.Done()
}()
wg.Add(1)
go func() {
select {
case <-ctx.Done():
p.gracefulStop(processCtx, p.Cmd, 5*time.Second)
case <-processCtx.Done():
}
wg.Done()
}()
p.Lock()
err := p.Cmd.Wait()
p.Unlock()
processCancel()
wg.Wait()
return err
}
func isQuitting(ctx context.Context) bool {
return ctx.Err() != nil
}
func defaultReadPipe(r io.Reader) {
//nolint:errcheck // Discarding the data, no need to handle an error
io.Copy(io.Discard, r)
}

View file

@ -0,0 +1,27 @@
//go:build !windows
package process
import (
"context"
"os/exec"
"syscall"
"time"
)
func (p *Process) gracefulStop(ctx context.Context, cmd *exec.Cmd, timeout time.Duration) {
select {
case <-time.After(timeout):
if err := cmd.Process.Signal(syscall.SIGTERM); err != nil {
p.Log.Errorf("Error after sending SIGTERM signal to process: %v", err)
}
case <-ctx.Done():
}
select {
case <-time.After(timeout):
if err := cmd.Process.Kill(); err != nil {
p.Log.Errorf("Error after killing process: %v", err)
}
case <-ctx.Done():
}
}

View file

@ -0,0 +1,81 @@
//go:build !windows
package process
import (
"bufio"
"flag"
"fmt"
"io"
"os"
"sync/atomic"
"syscall"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
// test that a restarting process resets pipes properly
func TestRestartingRebindsPipes(t *testing.T) {
if testing.Short() {
t.Skip("Skipping long running test in short mode")
}
exe, err := os.Executable()
require.NoError(t, err)
p, err := New([]string{exe, "-external"}, []string{"INTERNAL_PROCESS_MODE=application"})
p.RestartDelay = 100 * time.Nanosecond
p.Log = testutil.Logger{}
require.NoError(t, err)
linesRead := int64(0)
p.ReadStdoutFn = func(r io.Reader) {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
atomic.AddInt64(&linesRead, 1)
}
}
require.NoError(t, p.Start())
for atomic.LoadInt64(&linesRead) < 1 {
time.Sleep(1 * time.Millisecond)
}
require.NoError(t, syscall.Kill(p.Pid(), syscall.SIGKILL))
for atomic.LoadInt64(&linesRead) < 2 {
time.Sleep(1 * time.Millisecond)
}
// the mainLoopWg.Wait() call p.Stop() makes takes multiple seconds to complete
p.Stop()
}
var external = flag.Bool("external", false,
"if true, run externalProcess instead of tests")
func TestMain(m *testing.M) {
flag.Parse()
runMode := os.Getenv("INTERNAL_PROCESS_MODE")
if *external && runMode == "application" {
externalProcess()
os.Exit(0)
}
code := m.Run()
os.Exit(code)
}
// externalProcess is an external "misbehaving" process that won't exit
// cleanly.
func externalProcess() {
wait := make(chan int)
fmt.Fprintln(os.Stdout, "started")
<-wait
os.Exit(2) //nolint:revive // os.Exit called intentionally
}

View file

@ -0,0 +1,19 @@
//go:build windows
package process
import (
"context"
"os/exec"
"time"
)
func (p *Process) gracefulStop(ctx context.Context, cmd *exec.Cmd, timeout time.Duration) {
select {
case <-time.After(timeout):
if err := cmd.Process.Kill(); err != nil {
p.Log.Errorf("Error after killing process: %v", err)
}
case <-ctx.Done():
}
}

View file

@ -0,0 +1,183 @@
package rotate
// Rotating things
import (
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
)
// FilePerm defines the permissions that Writer will use for all
// the files it creates.
const (
FilePerm = os.FileMode(0644)
DateFormat = "2006-01-02"
)
// FileWriter implements the io.Writer interface and writes to the
// filename specified.
// Will rotate at the specified interval and/or when the current file size exceeds maxSizeInBytes
// At rotation time, current file is renamed and a new file is created.
// If the number of archives exceeds maxArchives, older files are deleted.
type FileWriter struct {
filename string
filenameRotationTemplate string
current *os.File
interval time.Duration
maxSizeInBytes int64
maxArchives int
expireTime time.Time
bytesWritten int64
sync.Mutex
}
// NewFileWriter creates a new file writer.
func NewFileWriter(filename string, interval time.Duration, maxSizeInBytes int64, maxArchives int) (io.WriteCloser, error) {
if interval == 0 && maxSizeInBytes <= 0 {
// No rotation needed so a basic io.Writer will do the trick
return openFile(filename)
}
w := &FileWriter{
filename: filename,
interval: interval,
maxSizeInBytes: maxSizeInBytes,
maxArchives: maxArchives,
filenameRotationTemplate: getFilenameRotationTemplate(filename),
}
if err := w.openCurrent(); err != nil {
return nil, err
}
return w, nil
}
func openFile(filename string) (*os.File, error) {
return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, FilePerm)
}
func getFilenameRotationTemplate(filename string) string {
// Extract the file extension
fileExt := filepath.Ext(filename)
// Remove the file extension from the filename (if any)
stem := strings.TrimSuffix(filename, fileExt)
return stem + ".%s-%s" + fileExt
}
// Write writes p to the current file, then checks to see if
// rotation is necessary.
func (w *FileWriter) Write(p []byte) (n int, err error) {
w.Lock()
defer w.Unlock()
if n, err = w.current.Write(p); err != nil {
return 0, err
}
w.bytesWritten += int64(n)
if err := w.rotateIfNeeded(); err != nil {
return 0, err
}
return n, nil
}
// Close closes the current file. Writer is unusable after this
// is called.
func (w *FileWriter) Close() (err error) {
w.Lock()
defer w.Unlock()
// Rotate before closing
if err := w.rotateIfNeeded(); err != nil {
return err
}
// Close the file if we did not rotate
if err := w.current.Close(); err != nil {
return err
}
w.current = nil
return nil
}
func (w *FileWriter) openCurrent() (err error) {
// In case ModTime() fails, we use time.Now()
w.expireTime = time.Now().Add(w.interval)
w.bytesWritten = 0
w.current, err = openFile(w.filename)
if err != nil {
return err
}
// Goal here is to rotate old pre-existing files.
// For that we use fileInfo.ModTime, instead of time.Now().
// Example: telegraf is restarted every 23 hours and
// the rotation interval is set to 24 hours.
// With time.now() as a reference we'd never rotate the file.
if fileInfo, err := w.current.Stat(); err == nil {
w.expireTime = fileInfo.ModTime().Add(w.interval)
w.bytesWritten = fileInfo.Size()
}
return w.rotateIfNeeded()
}
func (w *FileWriter) rotateIfNeeded() error {
if (w.interval > 0 && time.Now().After(w.expireTime)) ||
(w.maxSizeInBytes > 0 && w.bytesWritten >= w.maxSizeInBytes) {
if err := w.rotate(); err != nil {
// Ignore rotation errors and keep the log open
fmt.Printf("unable to rotate the file %q, %s", w.filename, err.Error())
}
return w.openCurrent()
}
return nil
}
func (w *FileWriter) rotate() (err error) {
if err := w.current.Close(); err != nil {
return err
}
// Use year-month-date for readability, unix time to make the file name unique with second precision
now := time.Now()
rotatedFilename := fmt.Sprintf(w.filenameRotationTemplate, now.Format(DateFormat), strconv.FormatInt(now.Unix(), 10))
if err := os.Rename(w.filename, rotatedFilename); err != nil {
return err
}
return w.purgeArchivesIfNeeded()
}
func (w *FileWriter) purgeArchivesIfNeeded() (err error) {
if w.maxArchives == -1 {
// Skip archiving
return nil
}
var matches []string
if matches, err = filepath.Glob(fmt.Sprintf(w.filenameRotationTemplate, "*", "*")); err != nil {
return err
}
// if there are more archives than the configured maximum, then purge older files
if len(matches) > w.maxArchives {
// sort files alphanumerically to delete older files first
sort.Strings(matches)
for _, filename := range matches[:len(matches)-w.maxArchives] {
if err := os.Remove(filename); err != nil {
return err
}
}
}
return nil
}

View file

@ -0,0 +1,150 @@
package rotate
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestFileWriter_NoRotation(t *testing.T) {
tempDir := t.TempDir()
writer, err := NewFileWriter(filepath.Join(tempDir, "test"), 0, 0, 0)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, writer.Close()) })
_, err = writer.Write([]byte("Hello World"))
require.NoError(t, err)
_, err = writer.Write([]byte("Hello World 2"))
require.NoError(t, err)
files, err := os.ReadDir(tempDir)
require.NoError(t, err)
require.Len(t, files, 1)
}
func TestFileWriter_TimeRotation(t *testing.T) {
tempDir := t.TempDir()
interval, err := time.ParseDuration("10ms")
require.NoError(t, err)
writer, err := NewFileWriter(filepath.Join(tempDir, "test"), interval, 0, -1)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, writer.Close()) })
_, err = writer.Write([]byte("Hello World"))
require.NoError(t, err)
time.Sleep(interval)
_, err = writer.Write([]byte("Hello World 2"))
require.NoError(t, err)
files, err := os.ReadDir(tempDir)
require.NoError(t, err)
require.Len(t, files, 2)
}
func TestFileWriter_ReopenTimeRotation(t *testing.T) {
tempDir := t.TempDir()
interval, err := time.ParseDuration("10ms")
require.NoError(t, err)
filePath := filepath.Join(tempDir, "test.log")
err = os.WriteFile(filePath, []byte("Hello World"), 0640)
time.Sleep(interval)
require.NoError(t, err)
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), interval, 0, -1)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, writer.Close()) })
files, err := os.ReadDir(tempDir)
require.NoError(t, err)
require.Len(t, files, 2)
}
func TestFileWriter_SizeRotation(t *testing.T) {
tempDir := t.TempDir()
maxSize := int64(9)
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, writer.Close()) })
_, err = writer.Write([]byte("Hello World"))
require.NoError(t, err)
_, err = writer.Write([]byte("World 2"))
require.NoError(t, err)
files, err := os.ReadDir(tempDir)
require.NoError(t, err)
require.Len(t, files, 2)
}
func TestFileWriter_ReopenSizeRotation(t *testing.T) {
tempDir := t.TempDir()
maxSize := int64(12)
filePath := filepath.Join(tempDir, "test.log")
err := os.WriteFile(filePath, []byte("Hello World"), 0640)
require.NoError(t, err)
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, writer.Close()) })
_, err = writer.Write([]byte("Hello World Again"))
require.NoError(t, err)
files, err := os.ReadDir(tempDir)
require.NoError(t, err)
require.Len(t, files, 2)
}
func TestFileWriter_DeleteArchives(t *testing.T) {
if testing.Short() {
t.Skip("Skipping long test in short mode")
}
tempDir := t.TempDir()
maxSize := int64(5)
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, 2)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, writer.Close()) })
_, err = writer.Write([]byte("First file"))
require.NoError(t, err)
// File names include the date with second precision
// So, to force rotation with different file names
// we need to wait
time.Sleep(1 * time.Second)
_, err = writer.Write([]byte("Second file"))
require.NoError(t, err)
time.Sleep(1 * time.Second)
_, err = writer.Write([]byte("Third file"))
require.NoError(t, err)
files, err := os.ReadDir(tempDir)
require.NoError(t, err)
require.Len(t, files, 3)
for _, tempFile := range files {
var bytes []byte
var err error
path := filepath.Join(tempDir, tempFile.Name())
if bytes, err = os.ReadFile(path); err != nil {
t.Error(err.Error())
return
}
contents := string(bytes)
if contents != "" && contents != "Second file" && contents != "Third file" {
t.Error("Should have deleted the eldest log file")
return
}
}
}
func TestFileWriter_CloseDoesNotRotate(t *testing.T) {
tempDir := t.TempDir()
maxSize := int64(9)
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1)
require.NoError(t, err)
require.NoError(t, writer.Close())
files, err := os.ReadDir(tempDir)
require.NoError(t, err)
require.Len(t, files, 1)
require.Regexp(t, "^test.log$", files[0].Name())
}

53
internal/snmp/config.go Normal file
View file

@ -0,0 +1,53 @@
package snmp
import (
"time"
"github.com/influxdata/telegraf/config"
)
type ClientConfig struct {
// Timeout to wait for a response.
Timeout config.Duration `toml:"timeout"`
Retries int `toml:"retries"`
Version uint8 `toml:"version"`
UnconnectedUDPSocket bool `toml:"unconnected_udp_socket"`
// Parameters for Version 1 & 2
Community string `toml:"community"`
// Parameters for Version 2 & 3
MaxRepetitions uint32 `toml:"max_repetitions"`
// Parameters for Version 3
ContextName string `toml:"context_name"`
SecLevel string `toml:"sec_level"`
SecName string `toml:"sec_name"`
AuthProtocol string `toml:"auth_protocol"`
AuthPassword config.Secret `toml:"auth_password"`
PrivProtocol string `toml:"priv_protocol"`
PrivPassword config.Secret `toml:"priv_password"`
EngineID string `toml:"-"`
EngineBoots uint32 `toml:"-"`
EngineTime uint32 `toml:"-"`
// Path to mib files
Path []string `toml:"path"`
Translator string `toml:"-"`
}
func DefaultClientConfig() *ClientConfig {
return &ClientConfig{
Timeout: config.Duration(5 * time.Second),
Retries: 3,
Version: 2,
Path: []string{"/usr/share/snmp/mibs"},
Translator: "gosmi",
Community: "public",
MaxRepetitions: 10,
SecLevel: "authNoPriv",
SecName: "myuser",
AuthProtocol: "MD5",
AuthPassword: config.NewSecret([]byte("pass")),
}
}

327
internal/snmp/field.go Normal file
View file

@ -0,0 +1,327 @@
package snmp
import (
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"net"
"strconv"
"strings"
"unicode/utf8"
"github.com/gosnmp/gosnmp"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
)
// Field holds the configuration for a Field to look up.
type Field struct {
// Name will be the name of the field.
Name string
// OID is prefix for this field. The plugin will perform a walk through all
// OIDs with this as their parent. For each value found, the plugin will strip
// off the OID prefix, and use the remainder as the index. For multiple fields
// to show up in the same row, they must share the same index.
Oid string
// OidIndexSuffix is the trailing sub-identifier on a table record OID that will be stripped off to get the record's index.
OidIndexSuffix string
// OidIndexLength specifies the length of the index in OID path segments. It can be used to remove sub-identifiers that vary in content or length.
OidIndexLength int
// IsTag controls whether this OID is output as a tag or a value.
IsTag bool
// Conversion controls any type conversion that is done on the value.
// "float"/"float(0)" will convert the value into a float.
// "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit.
// "int" will convert the value into an integer.
// "hwaddr" will convert a 6-byte string to a MAC address.
// "ipaddr" will convert the value to an IPv4 or IPv6 address.
// "enum"/"enum(1)" will convert the value according to its syntax. (Only supported with gosmi translator)
// "displayhint" will format the value according to the textual convention. (Only supported with gosmi translator)
Conversion string
// Translate tells if the value of the field should be snmptranslated
Translate bool
// Secondary index table allows to merge data from two tables with different index
// that this filed will be used to join them. There can be only one secondary index table.
SecondaryIndexTable bool
// This field is using secondary index, and will be later merged with primary index
// using SecondaryIndexTable. SecondaryIndexTable and SecondaryIndexUse are exclusive.
SecondaryIndexUse bool
// Controls if entries from secondary table should be added or not if joining
// index is present or not. I set to true, means that join is outer, and
// index is prepended with "Secondary." for missing values to avoid overlapping
// indexes from both tables.
// Can be set per field or globally with SecondaryIndexTable, global true overrides
// per field false.
SecondaryOuterJoin bool
initialized bool
translator Translator
}
// init() converts OID names to numbers, and sets the .Name attribute if unset.
func (f *Field) Init(tr Translator) error {
if f.initialized {
return nil
}
f.translator = tr
// check if oid needs translation or name is not set
if strings.ContainsAny(f.Oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") || f.Name == "" {
_, oidNum, oidText, conversion, err := f.translator.SnmpTranslate(f.Oid)
if err != nil {
return fmt.Errorf("translating: %w", err)
}
f.Oid = oidNum
if f.Name == "" {
f.Name = oidText
}
if f.Conversion == "" {
f.Conversion = conversion
}
}
if f.SecondaryIndexTable && f.SecondaryIndexUse {
return errors.New("fields SecondaryIndexTable and UseSecondaryIndex are exclusive")
}
if !f.SecondaryIndexTable && !f.SecondaryIndexUse && f.SecondaryOuterJoin {
return errors.New("field SecondaryOuterJoin set to true, but field is not being used in join")
}
switch f.Conversion {
case "hwaddr", "enum(1)":
config.PrintOptionValueDeprecationNotice("inputs.snmp", "field.conversion", f.Conversion, telegraf.DeprecationInfo{
Since: "1.33.0",
Notice: "Use 'displayhint' instead",
})
}
f.initialized = true
return nil
}
// fieldConvert converts from any type according to the conv specification
func (f *Field) Convert(ent gosnmp.SnmpPDU) (interface{}, error) {
v := ent.Value
// snmptranslate table field value here
if f.Translate {
if entOid, ok := v.(string); ok {
_, _, oidText, _, err := f.translator.SnmpTranslate(entOid)
if err == nil {
// If no error translating, the original value should be replaced
v = oidText
}
}
}
if f.Conversion == "" {
// OctetStrings may contain hex data that needs its own conversion
if ent.Type == gosnmp.OctetString && !utf8.Valid(v.([]byte)[:]) {
return hex.EncodeToString(v.([]byte)), nil
}
if bs, ok := v.([]byte); ok {
return string(bs), nil
}
return v, nil
}
var d int
if _, err := fmt.Sscanf(f.Conversion, "float(%d)", &d); err == nil || f.Conversion == "float" {
switch vt := v.(type) {
case float32:
v = float64(vt) / math.Pow10(d)
case float64:
v = vt / math.Pow10(d)
case int:
v = float64(vt) / math.Pow10(d)
case int8:
v = float64(vt) / math.Pow10(d)
case int16:
v = float64(vt) / math.Pow10(d)
case int32:
v = float64(vt) / math.Pow10(d)
case int64:
v = float64(vt) / math.Pow10(d)
case uint:
v = float64(vt) / math.Pow10(d)
case uint8:
v = float64(vt) / math.Pow10(d)
case uint16:
v = float64(vt) / math.Pow10(d)
case uint32:
v = float64(vt) / math.Pow10(d)
case uint64:
v = float64(vt) / math.Pow10(d)
case []byte:
vf, err := strconv.ParseFloat(string(vt), 64)
if err != nil {
return nil, fmt.Errorf("failed to convert field to float with value %s: %w", vt, err)
}
v = vf / math.Pow10(d)
case string:
vf, err := strconv.ParseFloat(vt, 64)
if err != nil {
return nil, fmt.Errorf("failed to convert field to float with value %s: %w", vt, err)
}
v = vf / math.Pow10(d)
}
return v, nil
}
if f.Conversion == "int" {
var err error
switch vt := v.(type) {
case float32:
v = int64(vt)
case float64:
v = int64(vt)
case int:
v = int64(vt)
case int8:
v = int64(vt)
case int16:
v = int64(vt)
case int32:
v = int64(vt)
case int64:
v = vt
case uint:
v = int64(vt)
case uint8:
v = int64(vt)
case uint16:
v = int64(vt)
case uint32:
v = int64(vt)
case uint64:
v = int64(vt)
case []byte:
v, err = strconv.ParseInt(string(vt), 10, 64)
case string:
v, err = strconv.ParseInt(vt, 10, 64)
}
return v, err
}
// Deprecated: Use displayhint instead
if f.Conversion == "hwaddr" {
switch vt := v.(type) {
case string:
v = net.HardwareAddr(vt).String()
case []byte:
v = net.HardwareAddr(vt).String()
default:
return nil, fmt.Errorf("invalid type (%T) for hwaddr conversion", vt)
}
return v, nil
}
if f.Conversion == "hex" {
switch vt := v.(type) {
case string:
switch ent.Type {
case gosnmp.IPAddress:
ip := net.ParseIP(vt)
if ip4 := ip.To4(); ip4 != nil {
v = hex.EncodeToString(ip4)
} else {
v = hex.EncodeToString(ip)
}
default:
return nil, fmt.Errorf("unsupported Asn1BER (%#v) for hex conversion", ent.Type)
}
case []byte:
v = hex.EncodeToString(vt)
default:
return nil, fmt.Errorf("unsupported type (%T) for hex conversion", vt)
}
return v, nil
}
split := strings.Split(f.Conversion, ":")
if split[0] == "hextoint" && len(split) == 3 {
endian := split[1]
bit := split[2]
bv, ok := v.([]byte)
if !ok {
return v, nil
}
var b []byte
switch bit {
case "uint64":
b = make([]byte, 8)
case "uint32":
b = make([]byte, 4)
case "uint16":
b = make([]byte, 2)
default:
return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit)
}
copy(b, bv)
var byteOrder binary.ByteOrder
switch endian {
case "LittleEndian":
byteOrder = binary.LittleEndian
case "BigEndian":
byteOrder = binary.BigEndian
default:
return nil, fmt.Errorf("invalid Endian value (%s) for hex to int conversion", endian)
}
switch bit {
case "uint64":
v = byteOrder.Uint64(b)
case "uint32":
v = byteOrder.Uint32(b)
case "uint16":
v = byteOrder.Uint16(b)
}
return v, nil
}
if f.Conversion == "ipaddr" {
var ipbs []byte
switch vt := v.(type) {
case string:
ipbs = []byte(vt)
case []byte:
ipbs = vt
default:
return nil, fmt.Errorf("invalid type (%T) for ipaddr conversion", vt)
}
switch len(ipbs) {
case 4, 16:
v = net.IP(ipbs).String()
default:
return nil, fmt.Errorf("invalid length (%d) for ipaddr conversion", len(ipbs))
}
return v, nil
}
if f.Conversion == "enum" {
return f.translator.SnmpFormatEnum(ent.Name, ent.Value, false)
}
// Deprecated: Use displayhint instead
if f.Conversion == "enum(1)" {
return f.translator.SnmpFormatEnum(ent.Name, ent.Value, true)
}
if f.Conversion == "displayhint" {
return f.translator.SnmpFormatDisplayHint(ent.Name, ent.Value)
}
return nil, fmt.Errorf("invalid conversion type %q", f.Conversion)
}

252
internal/snmp/field_test.go Normal file
View file

@ -0,0 +1,252 @@
package snmp
import (
"testing"
"github.com/gosnmp/gosnmp"
"github.com/stretchr/testify/require"
)
func TestConvertDefault(t *testing.T) {
tests := []struct {
name string
ent gosnmp.SnmpPDU
expected interface{}
errmsg string
}{
{
name: "integer",
ent: gosnmp.SnmpPDU{
Type: gosnmp.Integer,
Value: int(2),
},
expected: 2,
},
{
name: "octet string with valid bytes",
ent: gosnmp.SnmpPDU{
Type: gosnmp.OctetString,
Value: []byte{0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x20, 0x77, 0x6F, 0x72, 0x6C, 0x64},
},
expected: "Hello world",
},
{
name: "octet string with invalid bytes",
ent: gosnmp.SnmpPDU{
Type: gosnmp.OctetString,
Value: []byte{0x84, 0xc8, 0x7, 0xff, 0xfd, 0x38, 0x54, 0xc1},
},
expected: "84c807fffd3854c1",
},
}
f := Field{}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual, err := f.Convert(tt.ent)
if tt.errmsg != "" {
require.ErrorContains(t, err, tt.errmsg)
} else {
require.NoError(t, err)
}
require.Equal(t, tt.expected, actual)
})
}
t.Run("invalid", func(t *testing.T) {
f.Conversion = "invalid"
actual, err := f.Convert(gosnmp.SnmpPDU{})
require.Nil(t, actual)
require.ErrorContains(t, err, "invalid conversion type")
})
}
func TestConvertHex(t *testing.T) {
tests := []struct {
name string
ent gosnmp.SnmpPDU
expected interface{}
errmsg string
}{
{
name: "octet string with valid bytes",
ent: gosnmp.SnmpPDU{
Type: gosnmp.OctetString,
Value: []byte{0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x20, 0x77, 0x6F, 0x72, 0x6C, 0x64},
},
expected: "48656c6c6f20776f726c64",
},
{
name: "octet string with invalid bytes",
ent: gosnmp.SnmpPDU{
Type: gosnmp.OctetString,
Value: []byte{0x84, 0xc8, 0x7, 0xff, 0xfd, 0x38, 0x54, 0xc1},
},
expected: "84c807fffd3854c1",
},
{
name: "IPv4",
ent: gosnmp.SnmpPDU{
Type: gosnmp.IPAddress,
Value: "192.0.2.1",
},
expected: "c0000201",
},
{
name: "IPv6",
ent: gosnmp.SnmpPDU{
Type: gosnmp.IPAddress,
Value: "2001:db8::1",
},
expected: "20010db8000000000000000000000001",
},
{
name: "oid",
ent: gosnmp.SnmpPDU{
Type: gosnmp.ObjectIdentifier,
Value: ".1.2.3",
},
errmsg: "unsupported Asn1BER (0x6) for hex conversion",
},
{
name: "integer",
ent: gosnmp.SnmpPDU{
Type: gosnmp.Integer,
Value: int(2),
},
errmsg: "unsupported type (int) for hex conversion",
},
}
f := Field{Conversion: "hex"}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual, err := f.Convert(tt.ent)
if tt.errmsg != "" {
require.ErrorContains(t, err, tt.errmsg)
} else {
require.NoError(t, err)
}
require.Equal(t, tt.expected, actual)
})
}
}
func TestConvertHextoint(t *testing.T) {
tests := []struct {
name string
conversion string
ent gosnmp.SnmpPDU
expected interface{}
errmsg string
}{
{
name: "empty",
conversion: "hextoint:BigEndian:uint64",
ent: gosnmp.SnmpPDU{},
expected: nil,
},
{
name: "big endian uint64",
conversion: "hextoint:BigEndian:uint64",
ent: gosnmp.SnmpPDU{
Type: gosnmp.OctetString,
Value: []byte{0x84, 0xc8, 0x7, 0xff, 0xfd, 0x38, 0x54, 0xc1},
},
expected: uint64(0x84c807fffd3854c1),
},
{
name: "big endian uint32",
conversion: "hextoint:BigEndian:uint32",
ent: gosnmp.SnmpPDU{
Type: gosnmp.OctetString,
Value: []byte{0x84, 0xc8, 0x7, 0xff},
},
expected: uint32(0x84c807ff),
},
{
name: "big endian uint16",
conversion: "hextoint:BigEndian:uint16",
ent: gosnmp.SnmpPDU{
Type: gosnmp.OctetString,
Value: []byte{0x84, 0xc8},
},
expected: uint16(0x84c8),
},
{
name: "big endian invalid",
conversion: "hextoint:BigEndian:invalid",
ent: gosnmp.SnmpPDU{Type: gosnmp.OctetString, Value: make([]uint8, 0)},
errmsg: "invalid bit value",
},
{
name: "little endian uint64",
conversion: "hextoint:LittleEndian:uint64",
ent: gosnmp.SnmpPDU{
Type: gosnmp.OctetString,
Value: []byte{0x84, 0xc8, 0x7, 0xff, 0xfd, 0x38, 0x54, 0xc1},
},
expected: uint64(0xc15438fdff07c884),
},
{
name: "little endian uint32",
conversion: "hextoint:LittleEndian:uint32",
ent: gosnmp.SnmpPDU{
Type: gosnmp.OctetString,
Value: []byte{0x84, 0xc8, 0x7, 0xff},
},
expected: uint32(0xff07c884),
},
{
name: "little endian uint16",
conversion: "hextoint:LittleEndian:uint16",
ent: gosnmp.SnmpPDU{
Type: gosnmp.OctetString,
Value: []byte{0x84, 0xc8},
},
expected: uint16(0xc884),
},
{
name: "little endian single byte",
conversion: "hextoint:LittleEndian:uint16",
ent: gosnmp.SnmpPDU{
Type: gosnmp.OctetString,
Value: []byte{0x84},
},
expected: uint16(0x84),
},
{
name: "little endian invalid",
conversion: "hextoint:LittleEndian:invalid",
ent: gosnmp.SnmpPDU{Type: gosnmp.OctetString, Value: make([]byte, 0)},
errmsg: "invalid bit value",
},
{
name: "invalid",
conversion: "hextoint:invalid:uint64",
ent: gosnmp.SnmpPDU{Type: gosnmp.OctetString, Value: make([]byte, 0)},
errmsg: "invalid Endian value",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
f := Field{Conversion: tt.conversion}
actual, err := f.Convert(tt.ent)
if tt.errmsg != "" {
require.ErrorContains(t, err, tt.errmsg)
} else {
require.NoError(t, err)
}
require.Equal(t, tt.expected, actual)
})
}
}

140
internal/snmp/mib_loader.go Normal file
View file

@ -0,0 +1,140 @@
package snmp
import (
"fmt"
"os"
"path/filepath"
"sync"
"github.com/sleepinggenius2/gosmi"
"github.com/influxdata/telegraf"
)
// must init, append path for each directory, load module for every file
// or gosmi will fail without saying why
var m sync.Mutex
var once sync.Once
var cache = make(map[string]bool)
type MibLoader interface {
// appendPath takes the path of a directory
appendPath(path string)
// loadModule takes the name of a file in one of the
// directories. Basename only, no relative or absolute path
loadModule(path string) error
}
type GosmiMibLoader struct{}
func (*GosmiMibLoader) appendPath(path string) {
m.Lock()
defer m.Unlock()
gosmi.AppendPath(path)
}
func (*GosmiMibLoader) loadModule(path string) error {
m.Lock()
defer m.Unlock()
_, err := gosmi.LoadModule(path)
return err
}
// will give all found folders to gosmi and load in all modules found in the folders
func LoadMibsFromPath(paths []string, log telegraf.Logger, loader MibLoader) error {
folders, err := walkPaths(paths, log)
if err != nil {
return err
}
for _, path := range folders {
loader.appendPath(path)
modules, err := os.ReadDir(path)
if err != nil {
log.Warnf("Can't read directory %v", modules)
continue
}
for _, entry := range modules {
info, err := entry.Info()
if err != nil {
log.Warnf("Couldn't get info for %v: %v", entry.Name(), err)
continue
}
if info.Mode()&os.ModeSymlink != 0 {
symlink := filepath.Join(path, info.Name())
target, err := filepath.EvalSymlinks(symlink)
if err != nil {
log.Warnf("Couldn't evaluate symbolic links for %v: %v", symlink, err)
continue
}
// replace symlink's info with the target's info
info, err = os.Lstat(target)
if err != nil {
log.Warnf("Couldn't stat target %v: %v", target, err)
continue
}
}
if info.Mode().IsRegular() {
err := loader.loadModule(info.Name())
if err != nil {
log.Warnf("Couldn't load module %v: %v", info.Name(), err)
continue
}
}
}
}
return nil
}
// should walk the paths given and find all folders
func walkPaths(paths []string, log telegraf.Logger) ([]string, error) {
once.Do(gosmi.Init)
folders := make([]string, 0)
for _, mibPath := range paths {
// Check if we loaded that path already and skip it if so
m.Lock()
cached := cache[mibPath]
cache[mibPath] = true
m.Unlock()
if cached {
continue
}
err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error {
if info == nil {
log.Warnf("No mibs found")
if os.IsNotExist(err) {
log.Warnf("MIB path doesn't exist: %q", mibPath)
} else if err != nil {
return err
}
return nil
}
if info.Mode()&os.ModeSymlink != 0 {
target, err := filepath.EvalSymlinks(path)
if err != nil {
log.Warnf("Couldn't evaluate symbolic links for %v: %v", path, err)
}
info, err = os.Lstat(target)
if err != nil {
log.Warnf("Couldn't stat target %v: %v", target, err)
}
path = target
}
if info.IsDir() {
folders = append(folders, path)
}
return nil
})
if err != nil {
return folders, fmt.Errorf("couldn't walk path %q: %w", mibPath, err)
}
}
return folders, nil
}

View file

@ -0,0 +1,87 @@
package snmp
import (
"path/filepath"
"runtime"
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
type TestingMibLoader struct {
folders []string
files []string
}
func (t *TestingMibLoader) appendPath(path string) {
t.folders = append(t.folders, path)
}
func (t *TestingMibLoader) loadModule(path string) error {
t.files = append(t.files, path)
return nil
}
func TestFolderLookup(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Skipping on windows")
}
tests := []struct {
name string
mibPath [][]string
paths [][]string
files []string
}{
{
name: "loading folders",
mibPath: [][]string{{"testdata", "loadMibsFromPath", "root"}},
paths: [][]string{
{"testdata", "loadMibsFromPath", "root"},
{"testdata", "loadMibsFromPath", "root", "dirOne"},
{"testdata", "loadMibsFromPath", "root", "dirOne", "dirTwo"},
{"testdata", "loadMibsFromPath", "linkTarget"},
},
files: []string{"empty", "emptyFile"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
loader := TestingMibLoader{}
var givenPath []string
for _, paths := range tt.mibPath {
rootPath := filepath.Join(paths...)
givenPath = append(givenPath, rootPath)
}
err := LoadMibsFromPath(givenPath, testutil.Logger{}, &loader)
require.NoError(t, err)
var folders []string
for _, pathSlice := range tt.paths {
path := filepath.Join(pathSlice...)
folders = append(folders, path)
}
require.Equal(t, folders, loader.folders)
require.Equal(t, tt.files, loader.files)
})
}
}
func TestMissingMibPath(t *testing.T) {
log := testutil.Logger{}
path := []string{"non-existing-directory"}
require.NoError(t, LoadMibsFromPath(path, log, &GosmiMibLoader{}))
}
func BenchmarkMibLoading(b *testing.B) {
log := testutil.Logger{}
path := []string{"testdata/gosmi"}
for i := 0; i < b.N; i++ {
require.NoError(b, LoadMibsFromPath(path, log, &GosmiMibLoader{}))
}
}

304
internal/snmp/table.go Normal file
View file

@ -0,0 +1,304 @@
package snmp
import (
"errors"
"fmt"
"strings"
"time"
"github.com/gosnmp/gosnmp"
)
// Table holds the configuration for a SNMP table.
type Table struct {
// Name will be the name of the measurement.
Name string
// Which tags to inherit from the top-level config.
InheritTags []string
// Adds each row's table index as a tag.
IndexAsTag bool
// Fields is the tags and values to look up.
Fields []Field `toml:"field"`
// OID for automatic field population.
// If provided, init() will populate Fields with all the table columns of the
// given OID.
Oid string
initialized bool
translator Translator
}
// RTable is the resulting table built from a Table.
type RTable struct {
// Name is the name of the field, copied from Table.Name.
Name string
// Time is the time the table was built.
Time time.Time
// Rows are the rows that were found, one row for each table OID index found.
Rows []RTableRow
}
// RTableRow is the resulting row containing all the OID values which shared
// the same index.
type RTableRow struct {
// Tags are all the Field values which had IsTag=true.
Tags map[string]string
// Fields are all the Field values which had IsTag=false.
Fields map[string]interface{}
}
// Init builds & initializes the nested fields.
func (t *Table) Init(tr Translator) error {
// makes sure oid or name is set in config file
// otherwise snmp will produce metrics with an empty name
if t.Oid == "" && t.Name == "" {
return errors.New("unnamed SNMP table in config file: one or both of the oid and name settings must be set")
}
if t.initialized {
return nil
}
t.translator = tr
if err := t.initBuild(); err != nil {
return err
}
secondaryIndexTablePresent := false
// initialize all the nested fields
for i := range t.Fields {
if err := t.Fields[i].Init(t.translator); err != nil {
return fmt.Errorf("initializing field %s: %w", t.Fields[i].Name, err)
}
if t.Fields[i].SecondaryIndexTable {
if secondaryIndexTablePresent {
return errors.New("only one field can be SecondaryIndexTable")
}
secondaryIndexTablePresent = true
}
}
t.initialized = true
return nil
}
// initBuild initializes the table if it has an OID configured. If so, the
// net-snmp tools will be used to look up the OID and auto-populate the table's
// fields.
func (t *Table) initBuild() error {
if t.Oid == "" {
return nil
}
_, _, oidText, fields, err := t.translator.SnmpTable(t.Oid)
if err != nil {
return err
}
if t.Name == "" {
t.Name = oidText
}
knownOIDs := make(map[string]bool, len(t.Fields))
for _, f := range t.Fields {
knownOIDs[f.Oid] = true
}
for _, f := range fields {
if !knownOIDs[f.Oid] {
t.Fields = append(t.Fields, f)
}
}
return nil
}
// Build retrieves all the fields specified in the table and constructs the RTable.
func (t Table) Build(gs Connection, walk bool) (*RTable, error) {
rows := make(map[string]RTableRow)
// translation table for secondary index (when performing join on two tables)
secIdxTab := make(map[string]string)
secGlobalOuterJoin := false
for i, f := range t.Fields {
if f.SecondaryIndexTable {
secGlobalOuterJoin = f.SecondaryOuterJoin
if i != 0 {
t.Fields[0], t.Fields[i] = t.Fields[i], t.Fields[0]
}
break
}
}
tagCount := 0
for _, f := range t.Fields {
if f.IsTag {
tagCount++
}
if len(f.Oid) == 0 {
return nil, fmt.Errorf("cannot have empty OID on field %s", f.Name)
}
var oid string
if f.Oid[0] == '.' {
oid = f.Oid
} else {
// make sure OID has "." because the BulkWalkAll results do, and the prefix needs to match
oid = "." + f.Oid
}
// ifv contains a mapping of table OID index to field value
ifv := make(map[string]interface{})
if !walk {
// This is used when fetching non-table fields. Fields configured a the top
// scope of the plugin.
// We fetch the fields directly, and add them to ifv as if the index were an
// empty string. This results in all the non-table fields sharing the same
// index, and being added on the same row.
if pkt, err := gs.Get([]string{oid}); err != nil {
if errors.Is(err, gosnmp.ErrUnknownSecurityLevel) {
return nil, errors.New("unknown security level (sec_level)")
} else if errors.Is(err, gosnmp.ErrUnknownUsername) {
return nil, errors.New("unknown username (sec_name)")
} else if errors.Is(err, gosnmp.ErrWrongDigest) {
return nil, errors.New("wrong digest (auth_protocol, auth_password)")
} else if errors.Is(err, gosnmp.ErrDecryption) {
return nil, errors.New("decryption error (priv_protocol, priv_password)")
}
return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err)
} else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance {
ent := pkt.Variables[0]
fv, err := f.Convert(ent)
if err != nil {
return nil, fmt.Errorf("converting %q (OID %s) for field %s: %w", ent.Value, ent.Name, f.Name, err)
}
ifv[""] = fv
}
} else {
err := gs.Walk(oid, func(ent gosnmp.SnmpPDU) error {
if len(ent.Name) <= len(oid) || ent.Name[:len(oid)+1] != oid+"." {
return &walkError{} // break the walk
}
idx := ent.Name[len(oid):]
if f.OidIndexSuffix != "" {
if !strings.HasSuffix(idx, f.OidIndexSuffix) {
// this entry doesn't match our OidIndexSuffix. skip it
return nil
}
idx = idx[:len(idx)-len(f.OidIndexSuffix)]
}
if f.OidIndexLength != 0 {
i := f.OidIndexLength + 1 // leading separator
idx = strings.Map(func(r rune) rune {
if r == '.' {
i--
}
if i < 1 {
return -1
}
return r
}, idx)
}
fv, err := f.Convert(ent)
if err != nil {
return &walkError{
msg: fmt.Sprintf("converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name),
err: err,
}
}
ifv[idx] = fv
return nil
})
if err != nil {
// Our callback always wraps errors in a walkError.
// If this error isn't a walkError, we know it's not
// from the callback
var walkErr *walkError
if !errors.As(err, &walkErr) {
return nil, fmt.Errorf("performing bulk walk for field %s: %w", f.Name, err)
}
}
}
for idx, v := range ifv {
if f.SecondaryIndexUse {
if newidx, ok := secIdxTab[idx]; ok {
idx = newidx
} else {
if !secGlobalOuterJoin && !f.SecondaryOuterJoin {
continue
}
idx = ".Secondary" + idx
}
}
rtr, ok := rows[idx]
if !ok {
rtr = RTableRow{}
rtr.Tags = make(map[string]string)
rtr.Fields = make(map[string]interface{})
rows[idx] = rtr
}
if t.IndexAsTag && idx != "" {
if idx[0] == '.' {
idx = idx[1:]
}
rtr.Tags["index"] = idx
}
// don't add an empty string
if vs, ok := v.(string); !ok || vs != "" {
if f.IsTag {
if ok {
rtr.Tags[f.Name] = vs
} else {
rtr.Tags[f.Name] = fmt.Sprintf("%v", v)
}
} else {
rtr.Fields[f.Name] = v
}
if f.SecondaryIndexTable {
// indexes are stored here with prepending "." so we need to add them if needed
var vss string
if ok {
vss = "." + vs
} else {
vss = fmt.Sprintf(".%v", v)
}
if idx[0] == '.' {
secIdxTab[vss] = idx
} else {
secIdxTab[vss] = "." + idx
}
}
}
}
}
rt := RTable{
Name: t.Name,
Time: time.Now(), // TODO record time at start
Rows: make([]RTableRow, 0, len(rows)),
}
for _, r := range rows {
rt.Rows = append(rt.Rows, r)
}
return &rt, nil
}
type walkError struct {
msg string
err error
}
func (e *walkError) Error() string {
return e.msg
}
func (e *walkError) Unwrap() error {
return e.err
}

246
internal/snmp/table_test.go Normal file
View file

@ -0,0 +1,246 @@
package snmp
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestTableJoin_walk(t *testing.T) {
tbl := Table{
Name: "mytable",
IndexAsTag: true,
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.3.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.3.1.2",
},
{
Name: "myfield3",
Oid: ".1.0.0.3.1.3",
SecondaryIndexTable: true,
},
{
Name: "myfield4",
Oid: ".1.0.0.0.1.1",
SecondaryIndexUse: true,
IsTag: true,
},
{
Name: "myfield5",
Oid: ".1.0.0.0.1.2",
SecondaryIndexUse: true,
},
},
}
tb, err := tbl.Build(tsc, true)
require.NoError(t, err)
require.Equal(t, "mytable", tb.Name)
rtr1 := RTableRow{
Tags: map[string]string{
"myfield1": "instance",
"myfield4": "bar",
"index": "10",
},
Fields: map[string]interface{}{
"myfield2": 10,
"myfield3": 1,
"myfield5": 2,
},
}
rtr2 := RTableRow{
Tags: map[string]string{
"myfield1": "instance2",
"index": "11",
},
Fields: map[string]interface{}{
"myfield2": 20,
"myfield3": 2,
"myfield5": 0,
},
}
rtr3 := RTableRow{
Tags: map[string]string{
"myfield1": "instance3",
"index": "12",
},
Fields: map[string]interface{}{
"myfield2": 20,
"myfield3": 3,
},
}
require.Len(t, tb.Rows, 3)
require.Contains(t, tb.Rows, rtr1)
require.Contains(t, tb.Rows, rtr2)
require.Contains(t, tb.Rows, rtr3)
}
func TestTableOuterJoin_walk(t *testing.T) {
tbl := Table{
Name: "mytable",
IndexAsTag: true,
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.3.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.3.1.2",
},
{
Name: "myfield3",
Oid: ".1.0.0.3.1.3",
SecondaryIndexTable: true,
SecondaryOuterJoin: true,
},
{
Name: "myfield4",
Oid: ".1.0.0.0.1.1",
SecondaryIndexUse: true,
IsTag: true,
},
{
Name: "myfield5",
Oid: ".1.0.0.0.1.2",
SecondaryIndexUse: true,
},
},
}
tb, err := tbl.Build(tsc, true)
require.NoError(t, err)
require.Equal(t, "mytable", tb.Name)
rtr1 := RTableRow{
Tags: map[string]string{
"myfield1": "instance",
"myfield4": "bar",
"index": "10",
},
Fields: map[string]interface{}{
"myfield2": 10,
"myfield3": 1,
"myfield5": 2,
},
}
rtr2 := RTableRow{
Tags: map[string]string{
"myfield1": "instance2",
"index": "11",
},
Fields: map[string]interface{}{
"myfield2": 20,
"myfield3": 2,
"myfield5": 0,
},
}
rtr3 := RTableRow{
Tags: map[string]string{
"myfield1": "instance3",
"index": "12",
},
Fields: map[string]interface{}{
"myfield2": 20,
"myfield3": 3,
},
}
rtr4 := RTableRow{
Tags: map[string]string{
"index": "Secondary.0",
"myfield4": "foo",
},
Fields: map[string]interface{}{
"myfield5": 1,
},
}
require.Len(t, tb.Rows, 4)
require.Contains(t, tb.Rows, rtr1)
require.Contains(t, tb.Rows, rtr2)
require.Contains(t, tb.Rows, rtr3)
require.Contains(t, tb.Rows, rtr4)
}
func TestTableJoinNoIndexAsTag_walk(t *testing.T) {
tbl := Table{
Name: "mytable",
IndexAsTag: false,
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.3.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.3.1.2",
},
{
Name: "myfield3",
Oid: ".1.0.0.3.1.3",
SecondaryIndexTable: true,
},
{
Name: "myfield4",
Oid: ".1.0.0.0.1.1",
SecondaryIndexUse: true,
IsTag: true,
},
{
Name: "myfield5",
Oid: ".1.0.0.0.1.2",
SecondaryIndexUse: true,
},
},
}
tb, err := tbl.Build(tsc, true)
require.NoError(t, err)
require.Equal(t, "mytable", tb.Name)
rtr1 := RTableRow{
Tags: map[string]string{
"myfield1": "instance",
"myfield4": "bar",
// "index": "10",
},
Fields: map[string]interface{}{
"myfield2": 10,
"myfield3": 1,
"myfield5": 2,
},
}
rtr2 := RTableRow{
Tags: map[string]string{
"myfield1": "instance2",
// "index": "11",
},
Fields: map[string]interface{}{
"myfield2": 20,
"myfield3": 2,
"myfield5": 0,
},
}
rtr3 := RTableRow{
Tags: map[string]string{
"myfield1": "instance3",
// "index": "12",
},
Fields: map[string]interface{}{
"myfield2": 20,
"myfield3": 3,
},
}
require.Len(t, tb.Rows, 3)
require.Contains(t, tb.Rows, rtr1)
require.Contains(t, tb.Rows, rtr2)
require.Contains(t, tb.Rows, rtr3)
}

1467
internal/snmp/testdata/gosmi/bridgeMib vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,554 @@
SNMPv2-SMI DEFINITIONS ::= BEGIN
-- the path to the root
org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1
dod OBJECT IDENTIFIER ::= { org 6 }
internet OBJECT IDENTIFIER ::= { dod 1 }
directory OBJECT IDENTIFIER ::= { internet 1 }
mgmt OBJECT IDENTIFIER ::= { internet 2 }
mib-2 OBJECT IDENTIFIER ::= { mgmt 1 }
transmission OBJECT IDENTIFIER ::= { mib-2 10 }
experimental OBJECT IDENTIFIER ::= { internet 3 }
private OBJECT IDENTIFIER ::= { internet 4 }
enterprises OBJECT IDENTIFIER ::= { private 1 }
security OBJECT IDENTIFIER ::= { internet 5 }
snmpV2 OBJECT IDENTIFIER ::= { internet 6 }
-- transport domains
snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 }
-- transport proxies
snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 }
-- module identities
snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 }
-- Extended UTCTime, to allow dates with four-digit years
-- (Note that this definition of ExtUTCTime is not to be IMPORTed
-- by MIB modules.)
ExtUTCTime ::= OCTET STRING(SIZE(11 | 13))
-- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ
-- where: YY - last two digits of year (only years
-- between 1900-1999)
-- YYYY - last four digits of the year (any year)
-- MM - month (01 through 12)
-- DD - day of month (01 through 31)
-- HH - hours (00 through 23)
-- MM - minutes (00 through 59)
-- Z - denotes GMT (the ASCII character Z)
--
-- For example, "9502192015Z" and "199502192015Z" represent
-- 8:15pm GMT on 19 February 1995. Years after 1999 must use
-- the four digit year format. Years 1900-1999 may use the
-- two or four digit format.
-- definitions for information modules
MODULE-IDENTITY MACRO ::=
BEGIN
TYPE NOTATION ::=
"LAST-UPDATED" value(Update ExtUTCTime)
"ORGANIZATION" Text
"CONTACT-INFO" Text
"DESCRIPTION" Text
RevisionPart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
RevisionPart ::=
Revisions
| empty
Revisions ::=
Revision
| Revisions Revision
Revision ::=
"REVISION" value(Update ExtUTCTime)
"DESCRIPTION" Text
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
OBJECT-IDENTITY MACRO ::=
BEGIN
TYPE NOTATION ::=
"STATUS" Status
"DESCRIPTION" Text
ReferPart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
-- names of objects
-- (Note that these definitions of ObjectName and NotificationName
-- are not to be IMPORTed by MIB modules.)
ObjectName ::=
OBJECT IDENTIFIER
NotificationName ::=
OBJECT IDENTIFIER
-- syntax of objects
-- the "base types" defined here are:
-- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER
-- 8 application-defined types: Integer32, IpAddress, Counter32,
-- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64
ObjectSyntax ::=
CHOICE {
simple
SimpleSyntax,
-- note that SEQUENCEs for conceptual tables and
-- rows are not mentioned here...
application-wide
ApplicationSyntax
}
-- built-in ASN.1 types
SimpleSyntax ::=
CHOICE {
-- INTEGERs with a more restrictive range
-- may also be used
integer-value -- includes Integer32
INTEGER (-2147483648..2147483647),
-- OCTET STRINGs with a more restrictive size
-- may also be used
string-value
OCTET STRING (SIZE (0..65535)),
objectID-value
OBJECT IDENTIFIER
}
-- indistinguishable from INTEGER, but never needs more than
-- 32-bits for a two's complement representation
Integer32 ::=
INTEGER (-2147483648..2147483647)
-- application-wide types
ApplicationSyntax ::=
CHOICE {
ipAddress-value
IpAddress,
counter-value
Counter32,
timeticks-value
TimeTicks,
arbitrary-value
Opaque,
big-counter-value
Counter64,
unsigned-integer-value -- includes Gauge32
Unsigned32
}
-- in network-byte order
-- (this is a tagged type for historical reasons)
IpAddress ::=
[APPLICATION 0]
IMPLICIT OCTET STRING (SIZE (4))
-- this wraps
Counter32 ::=
[APPLICATION 1]
IMPLICIT INTEGER (0..4294967295)
-- this doesn't wrap
Gauge32 ::=
[APPLICATION 2]
IMPLICIT INTEGER (0..4294967295)
-- an unsigned 32-bit quantity
-- indistinguishable from Gauge32
Unsigned32 ::=
[APPLICATION 2]
IMPLICIT INTEGER (0..4294967295)
-- hundredths of seconds since an epoch
TimeTicks ::=
[APPLICATION 3]
IMPLICIT INTEGER (0..4294967295)
-- for backward-compatibility only
Opaque ::=
[APPLICATION 4]
IMPLICIT OCTET STRING
-- for counters that wrap in less than one hour with only 32 bits
Counter64 ::=
[APPLICATION 6]
IMPLICIT INTEGER (0..18446744073709551615)
-- definition for objects
OBJECT-TYPE MACRO ::=
BEGIN
TYPE NOTATION ::=
"SYNTAX" Syntax
UnitsPart
"MAX-ACCESS" Access
"STATUS" Status
"DESCRIPTION" Text
ReferPart
IndexPart
DefValPart
VALUE NOTATION ::=
value(VALUE ObjectName)
Syntax ::= -- Must be one of the following:
-- a base type (or its refinement),
-- a textual convention (or its refinement), or
-- a BITS pseudo-type
type
| "BITS" "{" NamedBits "}"
NamedBits ::= NamedBit
| NamedBits "," NamedBit
NamedBit ::= identifier "(" number ")" -- number is nonnegative
UnitsPart ::=
"UNITS" Text
| empty
Access ::=
"not-accessible"
| "accessible-for-notify"
| "read-only"
| "read-write"
| "read-create"
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
IndexPart ::=
"INDEX" "{" IndexTypes "}"
| "AUGMENTS" "{" Entry "}"
| empty
IndexTypes ::=
IndexType
| IndexTypes "," IndexType
IndexType ::=
"IMPLIED" Index
| Index
Index ::=
-- use the SYNTAX value of the
-- correspondent OBJECT-TYPE invocation
value(ObjectName)
Entry ::=
-- use the INDEX value of the
-- correspondent OBJECT-TYPE invocation
value(ObjectName)
DefValPart ::= "DEFVAL" "{" Defvalue "}"
| empty
Defvalue ::= -- must be valid for the type specified in
-- SYNTAX clause of same OBJECT-TYPE macro
value(ObjectSyntax)
| "{" BitsValue "}"
BitsValue ::= BitNames
| empty
BitNames ::= BitName
| BitNames "," BitName
BitName ::= identifier
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
-- definitions for notifications
NOTIFICATION-TYPE MACRO ::=
BEGIN
TYPE NOTATION ::=
ObjectsPart
"STATUS" Status
"DESCRIPTION" Text
ReferPart
VALUE NOTATION ::=
value(VALUE NotificationName)
ObjectsPart ::=
"OBJECTS" "{" Objects "}"
| empty
Objects ::=
Object
| Objects "," Object
Object ::=
value(ObjectName)
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
-- definitions of administrative identifiers
zeroDotZero OBJECT-IDENTITY
STATUS current
DESCRIPTION
"A value used for null identifiers."
::= { 0 0 }
TEXTUAL-CONVENTION MACRO ::=
BEGIN
TYPE NOTATION ::=
DisplayPart
"STATUS" Status
"DESCRIPTION" Text
ReferPart
"SYNTAX" Syntax
VALUE NOTATION ::=
value(VALUE Syntax) -- adapted ASN.1
DisplayPart ::=
"DISPLAY-HINT" Text
| empty
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
-- a character string as defined in [2]
Text ::= value(IA5String)
Syntax ::= -- Must be one of the following:
-- a base type (or its refinement), or
-- a BITS pseudo-type
type
| "BITS" "{" NamedBits "}"
NamedBits ::= NamedBit
| NamedBits "," NamedBit
NamedBit ::= identifier "(" number ")" -- number is nonnegative
END
MODULE-COMPLIANCE MACRO ::=
BEGIN
TYPE NOTATION ::=
"STATUS" Status
"DESCRIPTION" Text
ReferPart
ModulePart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
ModulePart ::=
Modules
Modules ::=
Module
| Modules Module
Module ::=
-- name of module --
"MODULE" ModuleName
MandatoryPart
CompliancePart
ModuleName ::=
-- identifier must start with uppercase letter
identifier ModuleIdentifier
-- must not be empty unless contained
-- in MIB Module
| empty
ModuleIdentifier ::=
value(OBJECT IDENTIFIER)
| empty
MandatoryPart ::=
"MANDATORY-GROUPS" "{" Groups "}"
| empty
Groups ::=
Group
| Groups "," Group
Group ::=
value(OBJECT IDENTIFIER)
CompliancePart ::=
Compliances
| empty
Compliances ::=
Compliance
| Compliances Compliance
Compliance ::=
ComplianceGroup
| Object
ComplianceGroup ::=
"GROUP" value(OBJECT IDENTIFIER)
"DESCRIPTION" Text
Object ::=
"OBJECT" value(ObjectName)
SyntaxPart
WriteSyntaxPart
AccessPart
"DESCRIPTION" Text
-- must be a refinement for object's SYNTAX clause
SyntaxPart ::= "SYNTAX" Syntax
| empty
-- must be a refinement for object's SYNTAX clause
WriteSyntaxPart ::= "WRITE-SYNTAX" Syntax
| empty
Syntax ::= -- Must be one of the following:
-- a base type (or its refinement),
-- a textual convention (or its refinement), or
-- a BITS pseudo-type
type
| "BITS" "{" NamedBits "}"
NamedBits ::= NamedBit
| NamedBits "," NamedBit
NamedBit ::= identifier "(" number ")" -- number is nonnegative
AccessPart ::=
"MIN-ACCESS" Access
| empty
Access ::=
"not-accessible"
| "accessible-for-notify"
| "read-only"
| "read-write"
| "read-create"
-- a character string as defined in [2]
Text ::= value(IA5String)
END
OBJECT-GROUP MACRO ::=
BEGIN
TYPE NOTATION ::=
ObjectsPart
"STATUS" Status
"DESCRIPTION" Text
ReferPart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
ObjectsPart ::=
"OBJECTS" "{" Objects "}"
Objects ::=
Object
| Objects "," Object
Object ::=
value(ObjectName)
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
-- a character string as defined in [2]
Text ::= value(IA5String)
END
InterfaceIndex ::= TEXTUAL-CONVENTION
DISPLAY-HINT "d"
STATUS current
DESCRIPTION
"A unique value, greater than zero, for each interface or
interface sub-layer in the managed system. It is
recommended that values are assigned contiguously starting
from 1. The value for each interface sub-layer must remain
constant at least from one re-initialization of the entity's
network management system to the next re-initialization."
SYNTAX Integer32 (1..2147483647)
MacAddress ::= TEXTUAL-CONVENTION
DISPLAY-HINT "1x:"
STATUS current
DESCRIPTION
"Represents an 802 MAC address represented in the
`canonical' order defined by IEEE 802.1a, i.e., as if it
were transmitted least significant bit first, even though
802.5 (in contrast to other 802.x protocols) requires MAC
addresses to be transmitted most significant bit first."
SYNTAX OCTET STRING (SIZE (6))
END

30
internal/snmp/testdata/gosmi/foo vendored Normal file
View file

@ -0,0 +1,30 @@
FOOTEST-MIB DEFINITIONS ::= BEGIN
IMPORTS
MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports;
fooTestMIB MODULE-IDENTITY
LAST-UPDATED "2021090800Z"
ORGANIZATION "influx"
CONTACT-INFO
"EMail: influx@email.com"
DESCRIPTION
"MIB module for testing snmp plugin
for telegraf
"
::= { iso 1 }
fooMIBObjects OBJECT IDENTIFIER ::= { iso 2 }
fooOne OBJECT IDENTIFIER ::= { iso 1 }
six OBJECT IDENTIFIER ::= { fooOne 1 }
three OBJECT IDENTIFIER ::= { six 3 }
foo OBJECT-TYPE
SYNTAX Integer32
ACCESS read-only
STATUS current
DESCRIPTION
"foo mib for testing"
::= { fooMIBObjects 3 }
END

169
internal/snmp/testdata/gosmi/fooImports vendored Normal file
View file

@ -0,0 +1,169 @@
fooImports DEFINITIONS ::= BEGIN
-- the path to the root
org OBJECT IDENTIFIER ::= { iso 1 } -- "iso" = 1
dod OBJECT IDENTIFIER ::= { org 2 }
internet OBJECT IDENTIFIER ::= { dod 3 }
ExtUTCTime ::= OCTET STRING(SIZE(11 | 13))
-- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ
-- where: YY - last two digits of year (only years
-- between 1900-1999)
-- YYYY - last four digits of the year (any year)
-- MM - month (01 through 12)
-- DD - day of month (01 through 31)
-- HH - hours (00 through 23)
-- MM - minutes (00 through 59)
-- Z - denotes GMT (the ASCII character Z)
--
-- For example, "9502192015Z" and "199502192015Z" represent
-- 8:15pm GMT on 19 February 1995. Years after 1999 must use
-- the four digit year format. Years 1900-1999 may use the
-- two or four digit format.
-- definitions for information modules
MODULE-IDENTITY MACRO ::=
BEGIN
TYPE NOTATION ::=
"LAST-UPDATED" value(Update ExtUTCTime)
"ORGANIZATION" Text
"CONTACT-INFO" Text
"DESCRIPTION" Text
RevisionPart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
RevisionPart ::=
Revisions
| empty
Revisions ::=
Revision
| Revisions Revision
Revision ::=
"REVISION" value(Update ExtUTCTime)
"DESCRIPTION" Text
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
OBJECT-IDENTITY MACRO ::=
BEGIN
TYPE NOTATION ::=
"STATUS" Status
"DESCRIPTION" Text
ReferPart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
-- names of objects
-- (Note that these definitions of ObjectName and NotificationName
-- are not to be IMPORTed by MIB modules.)
ObjectName ::=
OBJECT IDENTIFIER
NotificationName ::=
OBJECT IDENTIFIER
-- indistinguishable from INTEGER, but never needs more than
-- 32-bits for a two's complement representation
Integer32 ::=
INTEGER (-2147483648..2147483647)
-- definition for objects
OBJECT-TYPE MACRO ::=
BEGIN
TYPE NOTATION ::=
UnitsPart
"MAX-ACCESS" Access
"STATUS" Status
"DESCRIPTION" Text
ReferPart
IndexPart
DefValPart
VALUE NOTATION ::=
value(VALUE ObjectName)
NamedBits ::= NamedBit
| NamedBits "," NamedBit
NamedBit ::= identifier "(" number ")" -- number is nonnegative
UnitsPart ::=
"UNITS" Text
| empty
Access ::=
"not-accessible"
| "accessible-for-notify"
| "read-only"
| "read-write"
| "read-create"
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
IndexPart ::=
"INDEX" "{" IndexTypes "}"
| "AUGMENTS" "{" Entry "}"
| empty
IndexTypes ::=
IndexType
| IndexTypes "," IndexType
IndexType ::=
"IMPLIED" Index
| Index
Entry ::=
-- use the INDEX value of the
-- correspondent OBJECT-TYPE invocation
value(ObjectName)
DefValPart ::= "DEFVAL" "{" Defvalue "}"
| empty
BitsValue ::= BitNames
| empty
BitNames ::= BitName
| BitNames "," BitName
BitName ::= identifier
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
END

View file

@ -0,0 +1,84 @@
IF-MIB DEFINITIONS ::= BEGIN
IMPORTS
MODULE-IDENTITY, OBJECT-TYPE, Integer32, mib-2,
PhysAddress FROM ifPhysAddressImports;
ifMIB MODULE-IDENTITY
LAST-UPDATED "200006140000Z"
ORGANIZATION "IETF Interfaces MIB Working Group"
CONTACT-INFO
" Keith McCloghrie
Cisco Systems, Inc.
170 West Tasman Drive
San Jose, CA 95134-1706
US
408-526-5260
kzm@cisco.com"
DESCRIPTION
"The MIB module to describe generic objects for network
interface sub-layers. This MIB is an updated version of
MIB-II's ifTable, and incorporates the extensions defined in
RFC 1229."
REVISION "200006140000Z"
DESCRIPTION
"Clarifications agreed upon by the Interfaces MIB WG, and
published as RFC 2863."
REVISION "199602282155Z"
DESCRIPTION
"Revisions made by the Interfaces MIB WG, and published in
RFC 2233."
REVISION "199311082155Z"
DESCRIPTION
"Initial revision, published as part of RFC 1573."
::= { mib-2 31 }
ifMIBObjects OBJECT IDENTIFIER ::= { ifMIB 1 }
interfaces OBJECT IDENTIFIER ::= { mib-2 2 }
ifTable OBJECT-TYPE
SYNTAX SEQUENCE OF IfEntry
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"A list of interface entries. The number of entries is
given by the value of ifNumber."
::= { interfaces 2 }
ifEntry OBJECT-TYPE
SYNTAX IfEntry
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"An entry containing management information applicable to a
particular interface."
INDEX { ifIndex }
::= { ifTable 1 }
ifPhysAddress OBJECT-TYPE
SYNTAX PhysAddress
ACCESS read-only
STATUS mandatory
DESCRIPTION
"The interface's address at the protocol layer
immediately `below' the network layer in the
protocol stack. For interfaces which do not have
such an address (e.g., a serial line), this object
should contain an octet string of zero length."
::= { ifEntry 6 }
foo OBJECT-TYPE
SYNTAX Integer32
ACCESS read-only
STATUS current
DESCRIPTION
"foo mib for testing"
::= { ifEntry 9 }
END

View file

@ -0,0 +1,254 @@
SNMPv2-SMI DEFINITIONS ::= BEGIN
-- the path to the root
org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1
dod OBJECT IDENTIFIER ::= { org 6 }
internet OBJECT IDENTIFIER ::= { dod 1 }
directory OBJECT IDENTIFIER ::= { internet 1 }
mgmt OBJECT IDENTIFIER ::= { internet 2 }
mib-2 OBJECT IDENTIFIER ::= { mgmt 1 }
transmission OBJECT IDENTIFIER ::= { mib-2 10 }
experimental OBJECT IDENTIFIER ::= { internet 3 }
private OBJECT IDENTIFIER ::= { internet 4 }
enterprises OBJECT IDENTIFIER ::= { private 1 }
security OBJECT IDENTIFIER ::= { internet 5 }
snmpV2 OBJECT IDENTIFIER ::= { internet 6 }
-- transport domains
snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 }
-- transport proxies
snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 }
-- module identities
snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 }
-- Extended UTCTime, to allow dates with four-digit years
-- (Note that this definition of ExtUTCTime is not to be IMPORTed
-- by MIB modules.)
ExtUTCTime ::= OCTET STRING(SIZE(11 | 13))
-- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ
-- where: YY - last two digits of year (only years
-- between 1900-1999)
-- YYYY - last four digits of the year (any year)
-- MM - month (01 through 12)
-- DD - day of month (01 through 31)
-- HH - hours (00 through 23)
-- MM - minutes (00 through 59)
-- Z - denotes GMT (the ASCII character Z)
--
-- For example, "9502192015Z" and "199502192015Z" represent
-- 8:15pm GMT on 19 February 1995. Years after 1999 must use
-- the four digit year format. Years 1900-1999 may use the
-- two or four digit format.
-- definitions for information modules
MODULE-IDENTITY MACRO ::=
BEGIN
TYPE NOTATION ::=
"LAST-UPDATED" value(Update ExtUTCTime)
"ORGANIZATION" Text
"CONTACT-INFO" Text
"DESCRIPTION" Text
RevisionPart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
RevisionPart ::=
Revisions
| empty
Revisions ::=
Revision
| Revisions Revision
Revision ::=
"REVISION" value(Update ExtUTCTime)
"DESCRIPTION" Text
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
OBJECT-IDENTITY MACRO ::=
BEGIN
TYPE NOTATION ::=
"STATUS" Status
"DESCRIPTION" Text
ReferPart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
-- names of objects
-- (Note that these definitions of ObjectName and NotificationName
-- are not to be IMPORTed by MIB modules.)
ObjectName ::=
OBJECT IDENTIFIER
NotificationName ::=
OBJECT IDENTIFIER
-- syntax of objects
-- the "base types" defined here are:
-- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER
-- 8 application-defined types: Integer32, IpAddress, Counter32,
-- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64
ObjectSyntax ::=
CHOICE {
simple
SimpleSyntax,
-- note that SEQUENCEs for conceptual tables and
-- rows are not mentioned here...
application-wide
ApplicationSyntax
}
-- built-in ASN.1 types
SimpleSyntax ::=
CHOICE {
-- INTEGERs with a more restrictive range
-- may also be used
integer-value -- includes Integer32
INTEGER (-2147483648..2147483647),
-- OCTET STRINGs with a more restrictive size
-- may also be used
string-value
OCTET STRING (SIZE (0..65535)),
objectID-value
OBJECT IDENTIFIER
}
-- indistinguishable from INTEGER, but never needs more than
-- 32-bits for a two's complement representation
Integer32 ::=
INTEGER (-2147483648..2147483647)
-- definition for objects
OBJECT-TYPE MACRO ::=
BEGIN
TYPE NOTATION ::=
"SYNTAX" Syntax
UnitsPart
"MAX-ACCESS" Access
"STATUS" Status
"DESCRIPTION" Text
ReferPart
IndexPart
DefValPart
VALUE NOTATION ::=
value(VALUE ObjectName)
Syntax ::= -- Must be one of the following:
-- a base type (or its refinement),
-- a textual convention (or its refinement), or
-- a BITS pseudo-type
type
| "BITS" "{" NamedBits "}"
NamedBits ::= NamedBit
| NamedBits "," NamedBit
NamedBit ::= identifier "(" number ")" -- number is nonnegative
UnitsPart ::=
"UNITS" Text
| empty
Access ::=
"not-accessible"
| "accessible-for-notify"
| "read-only"
| "read-write"
| "read-create"
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
IndexPart ::=
"INDEX" "{" IndexTypes "}"
| "AUGMENTS" "{" Entry "}"
| empty
IndexTypes ::=
IndexType
| IndexTypes "," IndexType
IndexType ::=
"IMPLIED" Index
| Index
Index ::=
-- use the SYNTAX value of the
-- correspondent OBJECT-TYPE invocation
value(ObjectName)
Entry ::=
-- use the INDEX value of the
-- correspondent OBJECT-TYPE invocation
value(ObjectName)
DefValPart ::= "DEFVAL" "{" Defvalue "}"
| empty
Defvalue ::= -- must be valid for the type specified in
-- SYNTAX clause of same OBJECT-TYPE macro
value(ObjectSyntax)
| "{" BitsValue "}"
BitsValue ::= BitNames
| empty
BitNames ::= BitName
| BitNames "," BitName
BitName ::= identifier
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
PhysAddress ::= TEXTUAL-CONVENTION
DISPLAY-HINT "1x:"
STATUS current
DESCRIPTION
"Represents media- or physical-level addresses."
SYNTAX OCTET STRING
END

98
internal/snmp/testdata/gosmi/server vendored Normal file
View file

@ -0,0 +1,98 @@
TEST DEFINITIONS ::= BEGIN
IMPORTS
MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports;
TestMIB MODULE-IDENTITY
LAST-UPDATED "2021090800Z"
ORGANIZATION "influx"
CONTACT-INFO
"EMail: influx@email.com"
DESCRIPTION
"MIB module for testing snmp plugin
for telegraf
"
::= { iso 1 }
DateAndTime ::= TEXTUAL-CONVENTION
DISPLAY-HINT "2d-1d-1d,1d:1d:1d.1d,1a1d:1d"
STATUS current
DESCRIPTION
"A date-time specification.
field octets contents range
----- ------ -------- -----
1 1-2 year* 0..65536
2 3 month 1..12
3 4 day 1..31
4 5 hour 0..23
5 6 minutes 0..59
6 7 seconds 0..60
(use 60 for leap-second)
7 8 deci-seconds 0..9
8 9 direction from UTC '+' / '-'
9 10 hours from UTC* 0..13
10 11 minutes from UTC 0..59
* Notes:
- the value of year is in network-byte order
- daylight saving time in New Zealand is +13
For example, Tuesday May 26, 1992 at 1:30:15 PM EDT would be
displayed as:
1992-5-26,13:30:15.0,-4:0
Note that if only local time is known, then timezone
information (fields 8-10) is not present."
SYNTAX OCTET STRING (SIZE (8 | 11))
testingObjects OBJECT IDENTIFIER ::= { iso 0 }
testObjects OBJECT IDENTIFIER ::= { testingObjects 0 }
hostnameone OBJECT IDENTIFIER ::= {testObjects 1 }
hostname OBJECT IDENTIFIER ::= { hostnameone 1 }
testTable OBJECT IDENTIFIER ::= { testObjects 0 }
testMIBObjects OBJECT IDENTIFIER ::= { testTable 1 }
server OBJECT-TYPE
SYNTAX Integer32
ACCESS read-only
STATUS current
DESCRIPTION
"server mib for testing"
::= { testMIBObjects 1 }
connections OBJECT-TYPE
SYNTAX Integer32
ACCESS read-only
STATUS current
DESCRIPTION
"server mib for testing"
::= { testMIBObjects 2 }
latency OBJECT-TYPE
SYNTAX Integer32
ACCESS read-only
STATUS current
DESCRIPTION
"server mib for testing"
::= { testMIBObjects 3 }
description OBJECT-TYPE
SYNTAX Integer32
ACCESS read-only
STATUS current
DESCRIPTION
"server mib for testing"
::= { testMIBObjects 4 }
dateAndTime OBJECT-TYPE
SYNTAX DateAndTime
ACCESS read-only
STATUS current
DESCRIPTION
"A date-time specification."
::= { testMIBObjects 5 }
END

View file

@ -0,0 +1,174 @@
fooImports DEFINITIONS ::= BEGIN
-- the path to the root
org OBJECT IDENTIFIER ::= { iso 1 } -- "iso" = 1
dod OBJECT IDENTIFIER ::= { org 1 }
internet OBJECT IDENTIFIER ::= { dod 1 }
directory OBJECT IDENTIFIER ::= { internet 1 }
mgmt OBJECT IDENTIFIER ::= { internet 1 }
mib-2 OBJECT IDENTIFIER ::= { mgmt 1 }
ExtUTCTime ::= OCTET STRING(SIZE(11 | 13))
-- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ
-- where: YY - last two digits of year (only years
-- between 1900-1999)
-- YYYY - last four digits of the year (any year)
-- MM - month (01 through 12)
-- DD - day of month (01 through 31)
-- HH - hours (00 through 23)
-- MM - minutes (00 through 59)
-- Z - denotes GMT (the ASCII character Z)
--
-- For example, "9502192015Z" and "199502192015Z" represent
-- 8:15pm GMT on 19 February 1995. Years after 1999 must use
-- the four digit year format. Years 1900-1999 may use the
-- two or four digit format.
-- definitions for information modules
MODULE-IDENTITY MACRO ::=
BEGIN
TYPE NOTATION ::=
"LAST-UPDATED" value(Update ExtUTCTime)
"ORGANIZATION" Text
"CONTACT-INFO" Text
"DESCRIPTION" Text
RevisionPart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
RevisionPart ::=
Revisions
| empty
Revisions ::=
Revision
| Revisions Revision
Revision ::=
"REVISION" value(Update ExtUTCTime)
"DESCRIPTION" Text
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
OBJECT-IDENTITY MACRO ::=
BEGIN
TYPE NOTATION ::=
"STATUS" Status
"DESCRIPTION" Text
ReferPart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
-- names of objects
-- (Note that these definitions of ObjectName and NotificationName
-- are not to be IMPORTed by MIB modules.)
ObjectName ::=
OBJECT IDENTIFIER
NotificationName ::=
OBJECT IDENTIFIER
-- indistinguishable from INTEGER, but never needs more than
-- 32-bits for a two's complement representation
Integer32 ::=
INTEGER (-2147483648..2147483647)
-- definition for objects
OBJECT-TYPE MACRO ::=
BEGIN
TYPE NOTATION ::=
UnitsPart
"MAX-ACCESS" Access
"STATUS" Status
"DESCRIPTION" Text
ReferPart
IndexPart
DefValPart
VALUE NOTATION ::=
value(VALUE ObjectName)
NamedBits ::= NamedBit
| NamedBits "," NamedBit
NamedBit ::= identifier "(" number ")" -- number is nonnegative
UnitsPart ::=
"UNITS" Text
| empty
Access ::=
"not-accessible"
| "accessible-for-notify"
| "read-only"
| "read-write"
| "read-create"
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
IndexPart ::=
"INDEX" "{" IndexTypes "}"
| "AUGMENTS" "{" Entry "}"
| empty
IndexTypes ::=
IndexType
| IndexTypes "," IndexType
IndexType ::=
"IMPLIED" Index
| Index
Entry ::=
-- use the INDEX value of the
-- correspondent OBJECT-TYPE invocation
value(ObjectName)
DefValPart ::= "DEFVAL" "{" Defvalue "}"
| empty
BitsValue ::= BitNames
| empty
BitNames ::= BitName
| BitNames "," BitName
BitName ::= identifier
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
END

57
internal/snmp/testdata/gosmi/tableBuild vendored Normal file
View file

@ -0,0 +1,57 @@
TEST DEFINITIONS ::= BEGIN
IMPORTS
MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports;
TestMIB MODULE-IDENTITY
LAST-UPDATED "2021090800Z"
ORGANIZATION "influx"
CONTACT-INFO
"EMail: influx@email.com"
DESCRIPTION
"MIB module for testing snmp plugin
for telegraf
"
::= { iso 1 }
testingObjects OBJECT IDENTIFIER ::= { iso 0 }
testObjects OBJECT IDENTIFIER ::= { testingObjects 0 }
hostnameone OBJECT IDENTIFIER ::= {testObjects 1 }
hostname OBJECT IDENTIFIER ::= { hostnameone 1 }
testTable OBJECT IDENTIFIER ::= { testObjects 0 }
testMIBObjects OBJECT IDENTIFIER ::= { testTable 1 }
myfield1 OBJECT-TYPE
SYNTAX Integer32
ACCESS read-only
STATUS current
DESCRIPTION
"server mib for testing"
::= { testMIBObjects 1 }
myfield2 OBJECT-TYPE
SYNTAX Integer32
ACCESS read-only
STATUS current
DESCRIPTION
"server mib for testing"
::= { testMIBObjects 2 }
myfield3 OBJECT-TYPE
SYNTAX Integer32
ACCESS read-only
STATUS current
DESCRIPTION
"server mib for testing"
::= { testMIBObjects 3 }
myfield4 OBJECT-TYPE
SYNTAX Integer32
ACCESS read-only
STATUS current
DESCRIPTION
"server mib for testing"
::= { testMIBObjects 4 }
END

2613
internal/snmp/testdata/gosmi/tableMib vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,119 @@
RFC1155-SMI DEFINITIONS ::= BEGIN
EXPORTS -- EVERYTHING
internet, directory, mgmt,
experimental, private, enterprises,
OBJECT-TYPE, ObjectName, ObjectSyntax, SimpleSyntax,
ApplicationSyntax, NetworkAddress, IpAddress,
Counter, Gauge, TimeTicks, Opaque;
-- the path to the root
internet OBJECT IDENTIFIER ::= { iso org(3) dod(6) 1 }
directory OBJECT IDENTIFIER ::= { internet 1 }
mgmt OBJECT IDENTIFIER ::= { internet 2 }
experimental OBJECT IDENTIFIER ::= { internet 3 }
private OBJECT IDENTIFIER ::= { internet 4 }
enterprises OBJECT IDENTIFIER ::= { private 1 }
-- definition of object types
OBJECT-TYPE MACRO ::=
BEGIN
TYPE NOTATION ::= "SYNTAX" type (TYPE ObjectSyntax)
"ACCESS" Access
"STATUS" Status
VALUE NOTATION ::= value (VALUE ObjectName)
Access ::= "read-only"
| "read-write"
| "write-only"
| "not-accessible"
Status ::= "mandatory"
| "optional"
| "obsolete"
END
-- names of objects in the MIB
ObjectName ::=
OBJECT IDENTIFIER
-- syntax of objects in the MIB
ObjectSyntax ::=
CHOICE {
simple
SimpleSyntax,
-- note that simple SEQUENCEs are not directly
-- mentioned here to keep things simple (i.e.,
-- prevent mis-use). However, application-wide
-- types which are IMPLICITly encoded simple
-- SEQUENCEs may appear in the following CHOICE
application-wide
ApplicationSyntax
}
SimpleSyntax ::=
CHOICE {
number
INTEGER,
string
OCTET STRING,
object
OBJECT IDENTIFIER,
empty
NULL
}
ApplicationSyntax ::=
CHOICE {
address
NetworkAddress,
counter
Counter,
gauge
Gauge,
ticks
TimeTicks,
arbitrary
Opaque
-- other application-wide types, as they are
-- defined, will be added here
}
-- application-wide types
NetworkAddress ::=
CHOICE {
internet
IpAddress
}
IpAddress ::=
[APPLICATION 0] -- in network-byte order
IMPLICIT OCTET STRING (SIZE (4))
Counter ::=
[APPLICATION 1]
IMPLICIT INTEGER (0..4294967295)
Gauge ::=
[APPLICATION 2]
IMPLICIT INTEGER (0..4294967295)
TimeTicks ::=
[APPLICATION 3]
IMPLICIT INTEGER (0..4294967295)
Opaque ::=
[APPLICATION 4] -- arbitrary ASN.1 value,
IMPLICIT OCTET STRING -- "double-wrapped"
END

786
internal/snmp/testdata/gosmi/tcpMib vendored Normal file
View file

@ -0,0 +1,786 @@
TCP-MIB DEFINITIONS ::= BEGIN
IMPORTS
MODULE-IDENTITY, OBJECT-TYPE, Integer32, Unsigned32,
Gauge32, Counter32, Counter64, IpAddress, mib-2,
MODULE-COMPLIANCE, OBJECT-GROUP, InetAddress,
InetAddressType, InetPortNumber
FROM tcpMibImports;
tcpMIB MODULE-IDENTITY
LAST-UPDATED "200502180000Z" -- 18 February 2005
ORGANIZATION
"IETF IPv6 MIB Revision Team
http://www.ietf.org/html.charters/ipv6-charter.html"
CONTACT-INFO
"Rajiv Raghunarayan (editor)
Cisco Systems Inc.
170 West Tasman Drive
San Jose, CA 95134
Phone: +1 408 853 9612
Email: <raraghun@cisco.com>
Send comments to <ipv6@ietf.org>"
DESCRIPTION
"The MIB module for managing TCP implementations.
Copyright (C) The Internet Society (2005). This version
of this MIB module is a part of RFC 4022; see the RFC
itself for full legal notices."
REVISION "200502180000Z" -- 18 February 2005
DESCRIPTION
"IP version neutral revision, published as RFC 4022."
REVISION "9411010000Z"
DESCRIPTION
"Initial SMIv2 version, published as RFC 2012."
REVISION "9103310000Z"
DESCRIPTION
"The initial revision of this MIB module was part of
MIB-II."
::= { mib-2 49 }
-- the TCP base variables group
tcp OBJECT IDENTIFIER ::= { mib-2 6 }
-- Scalars
tcpRtoAlgorithm OBJECT-TYPE
SYNTAX INTEGER {
other(1), -- none of the following
constant(2), -- a constant rto
rsre(3), -- MIL-STD-1778, Appendix B
vanj(4), -- Van Jacobson's algorithm
rfc2988(5) -- RFC 2988
}
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The algorithm used to determine the timeout value used for
retransmitting unacknowledged octets."
::= { tcp 1 }
tcpRtoMin OBJECT-TYPE
SYNTAX Integer32 (0..2147483647)
UNITS "milliseconds"
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The minimum value permitted by a TCP implementation for
the retransmission timeout, measured in milliseconds.
More refined semantics for objects of this type depend
on the algorithm used to determine the retransmission
timeout; in particular, the IETF standard algorithm
rfc2988(5) provides a minimum value."
::= { tcp 2 }
tcpRtoMax OBJECT-TYPE
SYNTAX Integer32 (0..2147483647)
UNITS "milliseconds"
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The maximum value permitted by a TCP implementation for
the retransmission timeout, measured in milliseconds.
More refined semantics for objects of this type depend
on the algorithm used to determine the retransmission
timeout; in particular, the IETF standard algorithm
rfc2988(5) provides an upper bound (as part of an
adaptive backoff algorithm)."
::= { tcp 3 }
tcpMaxConn OBJECT-TYPE
SYNTAX Integer32 (-1 | 0..2147483647)
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The limit on the total number of TCP connections the entity
can support. In entities where the maximum number of
connections is dynamic, this object should contain the
value -1."
::= { tcp 4 }
tcpActiveOpens OBJECT-TYPE
SYNTAX Counter32
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The number of times that TCP connections have made a direct
transition to the SYN-SENT state from the CLOSED state.
Discontinuities in the value of this counter are
indicated via discontinuities in the value of sysUpTime."
::= { tcp 5 }
tcpPassiveOpens OBJECT-TYPE
SYNTAX Counter32
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The number of times TCP connections have made a direct
transition to the SYN-RCVD state from the LISTEN state.
Discontinuities in the value of this counter are
indicated via discontinuities in the value of sysUpTime."
::= { tcp 6 }
tcpAttemptFails OBJECT-TYPE
SYNTAX Counter32
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The number of times that TCP connections have made a direct
transition to the CLOSED state from either the SYN-SENT
state or the SYN-RCVD state, plus the number of times that
TCP connections have made a direct transition to the
LISTEN state from the SYN-RCVD state.
Discontinuities in the value of this counter are
indicated via discontinuities in the value of sysUpTime."
::= { tcp 7 }
tcpEstabResets OBJECT-TYPE
SYNTAX Counter32
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The number of times that TCP connections have made a direct
transition to the CLOSED state from either the ESTABLISHED
state or the CLOSE-WAIT state.
Discontinuities in the value of this counter are
indicated via discontinuities in the value of sysUpTime."
::= { tcp 8 }
tcpCurrEstab OBJECT-TYPE
SYNTAX Gauge32
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The number of TCP connections for which the current state
is either ESTABLISHED or CLOSE-WAIT."
::= { tcp 9 }
tcpInSegs OBJECT-TYPE
SYNTAX Counter32
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The total number of segments received, including those
received in error. This count includes segments received
on currently established connections.
Discontinuities in the value of this counter are
indicated via discontinuities in the value of sysUpTime."
::= { tcp 10 }
tcpOutSegs OBJECT-TYPE
SYNTAX Counter32
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The total number of segments sent, including those on
current connections but excluding those containing only
retransmitted octets.
Discontinuities in the value of this counter are
indicated via discontinuities in the value of sysUpTime."
::= { tcp 11 }
tcpRetransSegs OBJECT-TYPE
SYNTAX Counter32
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The total number of segments retransmitted; that is, the
number of TCP segments transmitted containing one or more
previously transmitted octets.
Discontinuities in the value of this counter are
indicated via discontinuities in the value of sysUpTime."
::= { tcp 12 }
tcpInErrs OBJECT-TYPE
SYNTAX Counter32
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The total number of segments received in error (e.g., bad
TCP checksums).
Discontinuities in the value of this counter are
indicated via discontinuities in the value of sysUpTime."
::= { tcp 14 }
tcpOutRsts OBJECT-TYPE
SYNTAX Counter32
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The number of TCP segments sent containing the RST flag.
Discontinuities in the value of this counter are
indicated via discontinuities in the value of sysUpTime."
::= { tcp 15 }
-- { tcp 16 } was used to represent the ipv6TcpConnTable in RFC 2452,
-- which has since been obsoleted. It MUST not be used.
tcpHCInSegs OBJECT-TYPE
SYNTAX Counter64
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The total number of segments received, including those
received in error. This count includes segments received
on currently established connections. This object is
the 64-bit equivalent of tcpInSegs.
Discontinuities in the value of this counter are
indicated via discontinuities in the value of sysUpTime."
::= { tcp 17 }
tcpHCOutSegs OBJECT-TYPE
SYNTAX Counter64
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The total number of segments sent, including those on
current connections but excluding those containing only
retransmitted octets. This object is the 64-bit
equivalent of tcpOutSegs.
Discontinuities in the value of this counter are
indicated via discontinuities in the value of sysUpTime."
::= { tcp 18 }
-- The TCP Connection table
tcpConnectionTable OBJECT-TYPE
SYNTAX SEQUENCE OF TcpConnectionEntry
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"A table containing information about existing TCP
connections. Note that unlike earlier TCP MIBs, there
is a separate table for connections in the LISTEN state."
::= { tcp 19 }
tcpConnectionEntry OBJECT-TYPE
SYNTAX TcpConnectionEntry
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"A conceptual row of the tcpConnectionTable containing
information about a particular current TCP connection.
Each row of this table is transient in that it ceases to
exist when (or soon after) the connection makes the
transition to the CLOSED state."
INDEX { tcpConnectionLocalAddressType,
tcpConnectionLocalAddress,
tcpConnectionLocalPort,
tcpConnectionRemAddressType,
tcpConnectionRemAddress,
tcpConnectionRemPort }
::= { tcpConnectionTable 1 }
TcpConnectionEntry ::= SEQUENCE {
tcpConnectionLocalAddressType InetAddressType,
tcpConnectionLocalAddress InetAddress,
tcpConnectionLocalPort InetPortNumber,
tcpConnectionRemAddressType InetAddressType,
tcpConnectionRemAddress InetAddress,
tcpConnectionRemPort InetPortNumber,
tcpConnectionState INTEGER,
tcpConnectionProcess Unsigned32
}
tcpConnectionLocalAddressType OBJECT-TYPE
SYNTAX InetAddressType
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"The address type of tcpConnectionLocalAddress."
::= { tcpConnectionEntry 1 }
tcpConnectionLocalAddress OBJECT-TYPE
SYNTAX InetAddress
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"The local IP address for this TCP connection. The type
of this address is determined by the value of
tcpConnectionLocalAddressType.
As this object is used in the index for the
tcpConnectionTable, implementors should be
careful not to create entries that would result in OIDs
with more than 128 subidentifiers; otherwise the information
cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3."
::= { tcpConnectionEntry 2 }
tcpConnectionLocalPort OBJECT-TYPE
SYNTAX InetPortNumber
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"The local port number for this TCP connection."
::= { tcpConnectionEntry 3 }
tcpConnectionRemAddressType OBJECT-TYPE
SYNTAX InetAddressType
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"The address type of tcpConnectionRemAddress."
::= { tcpConnectionEntry 4 }
tcpConnectionRemAddress OBJECT-TYPE
SYNTAX InetAddress
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"The remote IP address for this TCP connection. The type
of this address is determined by the value of
tcpConnectionRemAddressType.
As this object is used in the index for the
tcpConnectionTable, implementors should be
careful not to create entries that would result in OIDs
with more than 128 subidentifiers; otherwise the information
cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3."
::= { tcpConnectionEntry 5 }
tcpConnectionRemPort OBJECT-TYPE
SYNTAX InetPortNumber
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"The remote port number for this TCP connection."
::= { tcpConnectionEntry 6 }
tcpConnectionState OBJECT-TYPE
SYNTAX INTEGER {
closed(1),
listen(2),
synSent(3),
synReceived(4),
established(5),
finWait1(6),
finWait2(7),
closeWait(8),
lastAck(9),
closing(10),
timeWait(11),
deleteTCB(12)
}
MAX-ACCESS read-write
STATUS current
DESCRIPTION
"The state of this TCP connection.
The value listen(2) is included only for parallelism to the
old tcpConnTable and should not be used. A connection in
LISTEN state should be present in the tcpListenerTable.
The only value that may be set by a management station is
deleteTCB(12). Accordingly, it is appropriate for an agent
to return a `badValue' response if a management station
attempts to set this object to any other value.
If a management station sets this object to the value
deleteTCB(12), then the TCB (as defined in [RFC793]) of
the corresponding connection on the managed node is
deleted, resulting in immediate termination of the
connection.
As an implementation-specific option, a RST segment may be
sent from the managed node to the other TCP endpoint (note,
however, that RST segments are not sent reliably)."
::= { tcpConnectionEntry 7 }
tcpConnectionProcess OBJECT-TYPE
SYNTAX Unsigned32
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The system's process ID for the process associated with
this connection, or zero if there is no such process. This
value is expected to be the same as HOST-RESOURCES-MIB::
hrSWRunIndex or SYSAPPL-MIB::sysApplElmtRunIndex for some
row in the appropriate tables."
::= { tcpConnectionEntry 8 }
-- The TCP Listener table
tcpListenerTable OBJECT-TYPE
SYNTAX SEQUENCE OF TcpListenerEntry
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"A table containing information about TCP listeners. A
listening application can be represented in three
possible ways:
1. An application that is willing to accept both IPv4 and
IPv6 datagrams is represented by
a tcpListenerLocalAddressType of unknown (0) and
a tcpListenerLocalAddress of ''h (a zero-length
octet-string).
2. An application that is willing to accept only IPv4 or
IPv6 datagrams is represented by a
tcpListenerLocalAddressType of the appropriate address
type and a tcpListenerLocalAddress of '0.0.0.0' or '::'
respectively.
3. An application that is listening for data destined
only to a specific IP address, but from any remote
system, is represented by a tcpListenerLocalAddressType
of an appropriate address type, with
tcpListenerLocalAddress as the specific local address.
NOTE: The address type in this table represents the
address type used for the communication, irrespective
of the higher-layer abstraction. For example, an
application using IPv6 'sockets' to communicate via
IPv4 between ::ffff:10.0.0.1 and ::ffff:10.0.0.2 would
use InetAddressType ipv4(1))."
::= { tcp 20 }
tcpListenerEntry OBJECT-TYPE
SYNTAX TcpListenerEntry
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"A conceptual row of the tcpListenerTable containing
information about a particular TCP listener."
INDEX { tcpListenerLocalAddressType,
tcpListenerLocalAddress,
tcpListenerLocalPort }
::= { tcpListenerTable 1 }
TcpListenerEntry ::= SEQUENCE {
tcpListenerLocalAddressType InetAddressType,
tcpListenerLocalAddress InetAddress,
tcpListenerLocalPort InetPortNumber,
tcpListenerProcess Unsigned32
}
tcpListenerLocalAddressType OBJECT-TYPE
SYNTAX InetAddressType
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"The address type of tcpListenerLocalAddress. The value
should be unknown (0) if connection initiations to all
local IP addresses are accepted."
::= { tcpListenerEntry 1 }
tcpListenerLocalAddress OBJECT-TYPE
SYNTAX InetAddress
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"The local IP address for this TCP connection.
The value of this object can be represented in three
possible ways, depending on the characteristics of the
listening application:
1. For an application willing to accept both IPv4 and
IPv6 datagrams, the value of this object must be
''h (a zero-length octet-string), with the value
of the corresponding tcpListenerLocalAddressType
object being unknown (0).
2. For an application willing to accept only IPv4 or
IPv6 datagrams, the value of this object must be
'0.0.0.0' or '::' respectively, with
tcpListenerLocalAddressType representing the
appropriate address type.
3. For an application which is listening for data
destined only to a specific IP address, the value
of this object is the specific local address, with
tcpListenerLocalAddressType representing the
appropriate address type.
As this object is used in the index for the
tcpListenerTable, implementors should be
careful not to create entries that would result in OIDs
with more than 128 subidentifiers; otherwise the information
cannot be accessed, using SNMPv1, SNMPv2c, or SNMPv3."
::= { tcpListenerEntry 2 }
tcpListenerLocalPort OBJECT-TYPE
SYNTAX InetPortNumber
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"The local port number for this TCP connection."
::= { tcpListenerEntry 3 }
tcpListenerProcess OBJECT-TYPE
SYNTAX Unsigned32
MAX-ACCESS read-only
STATUS current
DESCRIPTION
"The system's process ID for the process associated with
this listener, or zero if there is no such process. This
value is expected to be the same as HOST-RESOURCES-MIB::
hrSWRunIndex or SYSAPPL-MIB::sysApplElmtRunIndex for some
row in the appropriate tables."
::= { tcpListenerEntry 4 }
-- The deprecated TCP Connection table
tcpConnTable OBJECT-TYPE
SYNTAX SEQUENCE OF TcpConnEntry
MAX-ACCESS not-accessible
STATUS deprecated
DESCRIPTION
"A table containing information about existing IPv4-specific
TCP connections or listeners. This table has been
deprecated in favor of the version neutral
tcpConnectionTable."
::= { tcp 13 }
tcpConnEntry OBJECT-TYPE
SYNTAX TcpConnEntry
MAX-ACCESS not-accessible
STATUS deprecated
DESCRIPTION
"A conceptual row of the tcpConnTable containing information
about a particular current IPv4 TCP connection. Each row
of this table is transient in that it ceases to exist when
(or soon after) the connection makes the transition to the
CLOSED state."
INDEX { tcpConnLocalAddress,
tcpConnLocalPort,
tcpConnRemAddress,
tcpConnRemPort }
::= { tcpConnTable 1 }
TcpConnEntry ::= SEQUENCE {
tcpConnState INTEGER,
tcpConnLocalAddress IpAddress,
tcpConnLocalPort Integer32,
tcpConnRemAddress IpAddress,
tcpConnRemPort Integer32
}
tcpConnState OBJECT-TYPE
SYNTAX INTEGER {
closed(1),
listen(2),
synSent(3),
synReceived(4),
established(5),
finWait1(6),
finWait2(7),
closeWait(8),
lastAck(9),
closing(10),
timeWait(11),
deleteTCB(12)
}
MAX-ACCESS read-write
STATUS deprecated
DESCRIPTION
"The state of this TCP connection.
The only value that may be set by a management station is
deleteTCB(12). Accordingly, it is appropriate for an agent
to return a `badValue' response if a management station
attempts to set this object to any other value.
If a management station sets this object to the value
deleteTCB(12), then the TCB (as defined in [RFC793]) of
the corresponding connection on the managed node is
deleted, resulting in immediate termination of the
connection.
As an implementation-specific option, a RST segment may be
sent from the managed node to the other TCP endpoint (note,
however, that RST segments are not sent reliably)."
::= { tcpConnEntry 1 }
tcpConnLocalAddress OBJECT-TYPE
SYNTAX IpAddress
MAX-ACCESS read-only
STATUS deprecated
DESCRIPTION
"The local IP address for this TCP connection. In the case
of a connection in the listen state willing to
accept connections for any IP interface associated with the
node, the value 0.0.0.0 is used."
::= { tcpConnEntry 2 }
tcpConnLocalPort OBJECT-TYPE
SYNTAX Integer32 (0..65535)
MAX-ACCESS read-only
STATUS deprecated
DESCRIPTION
"The local port number for this TCP connection."
::= { tcpConnEntry 3 }
tcpConnRemAddress OBJECT-TYPE
SYNTAX IpAddress
MAX-ACCESS read-only
STATUS deprecated
DESCRIPTION
"The remote IP address for this TCP connection."
::= { tcpConnEntry 4 }
tcpConnRemPort OBJECT-TYPE
SYNTAX Integer32 (0..65535)
MAX-ACCESS read-only
STATUS deprecated
DESCRIPTION
"The remote port number for this TCP connection."
::= { tcpConnEntry 5 }
-- conformance information
tcpMIBConformance OBJECT IDENTIFIER ::= { tcpMIB 2 }
tcpMIBCompliances OBJECT IDENTIFIER ::= { tcpMIBConformance 1 }
tcpMIBGroups OBJECT IDENTIFIER ::= { tcpMIBConformance 2 }
-- compliance statements
tcpMIBCompliance2 MODULE-COMPLIANCE
STATUS current
DESCRIPTION
"The compliance statement for systems that implement TCP.
A number of INDEX objects cannot be
represented in the form of OBJECT clauses in SMIv2 but
have the following compliance requirements,
expressed in OBJECT clause form in this description
clause:
-- OBJECT tcpConnectionLocalAddressType
-- SYNTAX InetAddressType { ipv4(1), ipv6(2) }
-- DESCRIPTION
-- This MIB requires support for only global IPv4
-- and IPv6 address types.
--
-- OBJECT tcpConnectionRemAddressType
-- SYNTAX InetAddressType { ipv4(1), ipv6(2) }
-- DESCRIPTION
-- This MIB requires support for only global IPv4
-- and IPv6 address types.
--
-- OBJECT tcpListenerLocalAddressType
-- SYNTAX InetAddressType { unknown(0), ipv4(1),
-- ipv6(2) }
-- DESCRIPTION
-- This MIB requires support for only global IPv4
-- and IPv6 address types. The type unknown also
-- needs to be supported to identify a special
-- case in the listener table: a listen using
-- both IPv4 and IPv6 addresses on the device.
--
"
MODULE -- this module
MANDATORY-GROUPS { tcpBaseGroup, tcpConnectionGroup,
tcpListenerGroup }
GROUP tcpHCGroup
DESCRIPTION
"This group is mandatory for systems that are capable
of receiving or transmitting more than 1 million TCP
segments per second. 1 million segments per second will
cause a Counter32 to wrap in just over an hour."
OBJECT tcpConnectionState
SYNTAX INTEGER { closed(1), listen(2), synSent(3),
synReceived(4), established(5),
finWait1(6), finWait2(7), closeWait(8),
lastAck(9), closing(10), timeWait(11) }
MIN-ACCESS read-only
DESCRIPTION
"Write access is not required, nor is support for the value
deleteTCB (12)."
::= { tcpMIBCompliances 2 }
tcpMIBCompliance MODULE-COMPLIANCE
STATUS deprecated
DESCRIPTION
"The compliance statement for IPv4-only systems that
implement TCP. In order to be IP version independent, this
compliance statement is deprecated in favor of
tcpMIBCompliance2. However, agents are still encouraged
to implement these objects in order to interoperate with
the deployed base of managers."
MODULE -- this module
MANDATORY-GROUPS { tcpGroup }
OBJECT tcpConnState
MIN-ACCESS read-only
DESCRIPTION
"Write access is not required."
::= { tcpMIBCompliances 1 }
-- units of conformance
tcpGroup OBJECT-GROUP
OBJECTS { tcpRtoAlgorithm, tcpRtoMin, tcpRtoMax,
tcpMaxConn, tcpActiveOpens,
tcpPassiveOpens, tcpAttemptFails,
tcpEstabResets, tcpCurrEstab, tcpInSegs,
tcpOutSegs, tcpRetransSegs, tcpConnState,
tcpConnLocalAddress, tcpConnLocalPort,
tcpConnRemAddress, tcpConnRemPort,
tcpInErrs, tcpOutRsts }
STATUS deprecated
DESCRIPTION
"The tcp group of objects providing for management of TCP
entities."
::= { tcpMIBGroups 1 }
tcpBaseGroup OBJECT-GROUP
OBJECTS { tcpRtoAlgorithm, tcpRtoMin, tcpRtoMax,
tcpMaxConn, tcpActiveOpens,
tcpPassiveOpens, tcpAttemptFails,
tcpEstabResets, tcpCurrEstab, tcpInSegs,
tcpOutSegs, tcpRetransSegs,
tcpInErrs, tcpOutRsts }
STATUS current
DESCRIPTION
"The group of counters common to TCP entities."
::= { tcpMIBGroups 2 }
tcpConnectionGroup OBJECT-GROUP
OBJECTS { tcpConnectionState, tcpConnectionProcess }
STATUS current
DESCRIPTION
"The group provides general information about TCP
connections."
::= { tcpMIBGroups 3 }
tcpListenerGroup OBJECT-GROUP
OBJECTS { tcpListenerProcess }
STATUS current
DESCRIPTION
"This group has objects providing general information about
TCP listeners."
::= { tcpMIBGroups 4 }
tcpHCGroup OBJECT-GROUP
OBJECTS { tcpHCInSegs, tcpHCOutSegs }
STATUS current
DESCRIPTION
"The group of objects providing for counters of high speed
TCP implementations."
::= { tcpMIBGroups 5 }
END

View file

@ -0,0 +1,639 @@
SNMPv2-SMI DEFINITIONS ::= BEGIN
-- the path to the root
org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1
dod OBJECT IDENTIFIER ::= { org 6 }
internet OBJECT IDENTIFIER ::= { dod 1 }
directory OBJECT IDENTIFIER ::= { internet 1 }
mgmt OBJECT IDENTIFIER ::= { internet 2 }
mib-2 OBJECT IDENTIFIER ::= { mgmt 1 }
transmission OBJECT IDENTIFIER ::= { mib-2 10 }
experimental OBJECT IDENTIFIER ::= { internet 3 }
private OBJECT IDENTIFIER ::= { internet 4 }
enterprises OBJECT IDENTIFIER ::= { private 1 }
security OBJECT IDENTIFIER ::= { internet 5 }
snmpV2 OBJECT IDENTIFIER ::= { internet 6 }
-- transport domains
snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 }
-- transport proxies
snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 }
-- module identities
snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 }
-- Extended UTCTime, to allow dates with four-digit years
-- (Note that this definition of ExtUTCTime is not to be IMPORTed
-- by MIB modules.)
ExtUTCTime ::= OCTET STRING(SIZE(11 | 13))
-- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ
-- where: YY - last two digits of year (only years
-- between 1900-1999)
-- YYYY - last four digits of the year (any year)
-- MM - month (01 through 12)
-- DD - day of month (01 through 31)
-- HH - hours (00 through 23)
-- MM - minutes (00 through 59)
-- Z - denotes GMT (the ASCII character Z)
--
-- For example, "9502192015Z" and "199502192015Z" represent
-- 8:15pm GMT on 19 February 1995. Years after 1999 must use
-- the four digit year format. Years 1900-1999 may use the
-- two or four digit format.
-- definitions for information modules
MODULE-IDENTITY MACRO ::=
BEGIN
TYPE NOTATION ::=
"LAST-UPDATED" value(Update ExtUTCTime)
"ORGANIZATION" Text
"CONTACT-INFO" Text
"DESCRIPTION" Text
RevisionPart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
RevisionPart ::=
Revisions
| empty
Revisions ::=
Revision
| Revisions Revision
Revision ::=
"REVISION" value(Update ExtUTCTime)
"DESCRIPTION" Text
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
OBJECT-IDENTITY MACRO ::=
BEGIN
TYPE NOTATION ::=
"STATUS" Status
"DESCRIPTION" Text
ReferPart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
-- names of objects
-- (Note that these definitions of ObjectName and NotificationName
-- are not to be IMPORTed by MIB modules.)
ObjectName ::=
OBJECT IDENTIFIER
NotificationName ::=
OBJECT IDENTIFIER
-- syntax of objects
-- the "base types" defined here are:
-- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER
-- 8 application-defined types: Integer32, IpAddress, Counter32,
-- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64
ObjectSyntax ::=
CHOICE {
simple
SimpleSyntax,
-- note that SEQUENCEs for conceptual tables and
-- rows are not mentioned here...
application-wide
ApplicationSyntax
}
-- built-in ASN.1 types
SimpleSyntax ::=
CHOICE {
-- INTEGERs with a more restrictive range
-- may also be used
integer-value -- includes Integer32
INTEGER (-2147483648..2147483647),
-- OCTET STRINGs with a more restrictive size
-- may also be used
string-value
OCTET STRING (SIZE (0..65535)),
objectID-value
OBJECT IDENTIFIER
}
-- indistinguishable from INTEGER, but never needs more than
-- 32-bits for a two's complement representation
Integer32 ::=
INTEGER (-2147483648..2147483647)
-- application-wide types
ApplicationSyntax ::=
CHOICE {
ipAddress-value
IpAddress,
counter-value
Counter32,
timeticks-value
TimeTicks,
arbitrary-value
Opaque,
big-counter-value
Counter64,
unsigned-integer-value -- includes Gauge32
Unsigned32
}
-- in network-byte order
-- (this is a tagged type for historical reasons)
IpAddress ::=
[APPLICATION 0]
IMPLICIT OCTET STRING (SIZE (4))
-- this wraps
Counter32 ::=
[APPLICATION 1]
IMPLICIT INTEGER (0..4294967295)
-- this doesn't wrap
Gauge32 ::=
[APPLICATION 2]
IMPLICIT INTEGER (0..4294967295)
-- an unsigned 32-bit quantity
-- indistinguishable from Gauge32
Unsigned32 ::=
[APPLICATION 2]
IMPLICIT INTEGER (0..4294967295)
-- hundredths of seconds since an epoch
TimeTicks ::=
[APPLICATION 3]
IMPLICIT INTEGER (0..4294967295)
-- for backward-compatibility only
Opaque ::=
[APPLICATION 4]
IMPLICIT OCTET STRING
-- for counters that wrap in less than one hour with only 32 bits
Counter64 ::=
[APPLICATION 6]
IMPLICIT INTEGER (0..18446744073709551615)
-- definition for objects
OBJECT-TYPE MACRO ::=
BEGIN
TYPE NOTATION ::=
"SYNTAX" Syntax
UnitsPart
"MAX-ACCESS" Access
"STATUS" Status
"DESCRIPTION" Text
ReferPart
IndexPart
DefValPart
VALUE NOTATION ::=
value(VALUE ObjectName)
Syntax ::= -- Must be one of the following:
-- a base type (or its refinement),
-- a textual convention (or its refinement), or
-- a BITS pseudo-type
type
| "BITS" "{" NamedBits "}"
NamedBits ::= NamedBit
| NamedBits "," NamedBit
NamedBit ::= identifier "(" number ")" -- number is nonnegative
UnitsPart ::=
"UNITS" Text
| empty
Access ::=
"not-accessible"
| "accessible-for-notify"
| "read-only"
| "read-write"
| "read-create"
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
IndexPart ::=
"INDEX" "{" IndexTypes "}"
| "AUGMENTS" "{" Entry "}"
| empty
IndexTypes ::=
IndexType
| IndexTypes "," IndexType
IndexType ::=
"IMPLIED" Index
| Index
Index ::=
-- use the SYNTAX value of the
-- correspondent OBJECT-TYPE invocation
value(ObjectName)
Entry ::=
-- use the INDEX value of the
-- correspondent OBJECT-TYPE invocation
value(ObjectName)
DefValPart ::= "DEFVAL" "{" Defvalue "}"
| empty
Defvalue ::= -- must be valid for the type specified in
-- SYNTAX clause of same OBJECT-TYPE macro
value(ObjectSyntax)
| "{" BitsValue "}"
BitsValue ::= BitNames
| empty
BitNames ::= BitName
| BitNames "," BitName
BitName ::= identifier
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
-- definitions for notifications
NOTIFICATION-TYPE MACRO ::=
BEGIN
TYPE NOTATION ::=
ObjectsPart
"STATUS" Status
"DESCRIPTION" Text
ReferPart
VALUE NOTATION ::=
value(VALUE NotificationName)
ObjectsPart ::=
"OBJECTS" "{" Objects "}"
| empty
Objects ::=
Object
| Objects "," Object
Object ::=
value(ObjectName)
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
-- a character string as defined in section 3.1.1
Text ::= value(IA5String)
END
-- definitions of administrative identifiers
zeroDotZero OBJECT-IDENTITY
STATUS current
DESCRIPTION
"A value used for null identifiers."
::= { 0 0 }
TEXTUAL-CONVENTION MACRO ::=
BEGIN
TYPE NOTATION ::=
DisplayPart
"STATUS" Status
"DESCRIPTION" Text
ReferPart
"SYNTAX" Syntax
VALUE NOTATION ::=
value(VALUE Syntax) -- adapted ASN.1
DisplayPart ::=
"DISPLAY-HINT" Text
| empty
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
-- a character string as defined in [2]
Text ::= value(IA5String)
Syntax ::= -- Must be one of the following:
-- a base type (or its refinement), or
-- a BITS pseudo-type
type
| "BITS" "{" NamedBits "}"
NamedBits ::= NamedBit
| NamedBits "," NamedBit
NamedBit ::= identifier "(" number ")" -- number is nonnegative
END
MODULE-COMPLIANCE MACRO ::=
BEGIN
TYPE NOTATION ::=
"STATUS" Status
"DESCRIPTION" Text
ReferPart
ModulePart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
ModulePart ::=
Modules
Modules ::=
Module
| Modules Module
Module ::=
-- name of module --
"MODULE" ModuleName
MandatoryPart
CompliancePart
ModuleName ::=
-- identifier must start with uppercase letter
identifier ModuleIdentifier
-- must not be empty unless contained
-- in MIB Module
| empty
ModuleIdentifier ::=
value(OBJECT IDENTIFIER)
| empty
MandatoryPart ::=
"MANDATORY-GROUPS" "{" Groups "}"
| empty
Groups ::=
Group
| Groups "," Group
Group ::=
value(OBJECT IDENTIFIER)
CompliancePart ::=
Compliances
| empty
Compliances ::=
Compliance
| Compliances Compliance
Compliance ::=
ComplianceGroup
| Object
ComplianceGroup ::=
"GROUP" value(OBJECT IDENTIFIER)
"DESCRIPTION" Text
Object ::=
"OBJECT" value(ObjectName)
SyntaxPart
WriteSyntaxPart
AccessPart
"DESCRIPTION" Text
-- must be a refinement for object's SYNTAX clause
SyntaxPart ::= "SYNTAX" Syntax
| empty
-- must be a refinement for object's SYNTAX clause
WriteSyntaxPart ::= "WRITE-SYNTAX" Syntax
| empty
Syntax ::= -- Must be one of the following:
-- a base type (or its refinement),
-- a textual convention (or its refinement), or
-- a BITS pseudo-type
type
| "BITS" "{" NamedBits "}"
NamedBits ::= NamedBit
| NamedBits "," NamedBit
NamedBit ::= identifier "(" number ")" -- number is nonnegative
AccessPart ::=
"MIN-ACCESS" Access
| empty
Access ::=
"not-accessible"
| "accessible-for-notify"
| "read-only"
| "read-write"
| "read-create"
-- a character string as defined in [2]
Text ::= value(IA5String)
END
OBJECT-GROUP MACRO ::=
BEGIN
TYPE NOTATION ::=
ObjectsPart
"STATUS" Status
"DESCRIPTION" Text
ReferPart
VALUE NOTATION ::=
value(VALUE OBJECT IDENTIFIER)
ObjectsPart ::=
"OBJECTS" "{" Objects "}"
Objects ::=
Object
| Objects "," Object
Object ::=
value(ObjectName)
Status ::=
"current"
| "deprecated"
| "obsolete"
ReferPart ::=
"REFERENCE" Text
| empty
-- a character string as defined in [2]
Text ::= value(IA5String)
END
InetPortNumber ::= TEXTUAL-CONVENTION
DISPLAY-HINT "d"
STATUS current
DESCRIPTION
"Represents a 16 bit port number of an Internet transport
layer protocol. Port numbers are assigned by IANA. A
current list of all assignments is available from
<http://www.iana.org/>.
The value zero is object-specific and must be defined as
part of the description of any object that uses this
syntax. Examples of the usage of zero might include
situations where a port number is unknown, or when the
value zero is used as a wildcard in a filter."
REFERENCE "STD 6 (RFC 768), STD 7 (RFC 793) and RFC 2960"
SYNTAX Unsigned32 (0..65535)
InetAddress ::= TEXTUAL-CONVENTION
STATUS current
DESCRIPTION
"Denotes a generic Internet address.
An InetAddress value is always interpreted within the context
of an InetAddressType value. Every usage of the InetAddress
textual convention is required to specify the InetAddressType
object that provides the context. It is suggested that the
InetAddressType object be logically registered before the
object(s) that use the InetAddress textual convention, if
they appear in the same logical row.
The value of an InetAddress object must always be
consistent with the value of the associated InetAddressType
object. Attempts to set an InetAddress object to a value
inconsistent with the associated InetAddressType
must fail with an inconsistentValue error.
When this textual convention is used as the syntax of an
index object, there may be issues with the limit of 128
sub-identifiers specified in SMIv2, STD 58. In this case,
the object definition MUST include a 'SIZE' clause to
limit the number of potential instance sub-identifiers;
otherwise the applicable constraints MUST be stated in
the appropriate conceptual row DESCRIPTION clauses, or
in the surrounding documentation if there is no single
DESCRIPTION clause that is appropriate."
SYNTAX OCTET STRING (SIZE (0..255))
InetAddressType ::= TEXTUAL-CONVENTION
STATUS current
DESCRIPTION
"A value that represents a type of Internet address.
unknown(0) An unknown address type. This value MUST
be used if the value of the corresponding
InetAddress object is a zero-length string.
It may also be used to indicate an IP address
that is not in one of the formats defined
below.
ipv4(1) An IPv4 address as defined by the
InetAddressIPv4 textual convention.
ipv6(2) An IPv6 address as defined by the
InetAddressIPv6 textual convention.
ipv4z(3) A non-global IPv4 address including a zone
index as defined by the InetAddressIPv4z
textual convention.
ipv6z(4) A non-global IPv6 address including a zone
index as defined by the InetAddressIPv6z
textual convention.
dns(16) A DNS domain name as defined by the
InetAddressDNS textual convention.
Each definition of a concrete InetAddressType value must be
accompanied by a definition of a textual convention for use
with that InetAddressType.
To support future extensions, the InetAddressType textual
convention SHOULD NOT be sub-typed in object type definitions.
It MAY be sub-typed in compliance statements in order to
require only a subset of these address types for a compliant
implementation.
Implementations must ensure that InetAddressType objects
and any dependent objects (e.g., InetAddress objects) are
consistent. An inconsistentValue error must be generated
if an attempt to change an InetAddressType object would,
for example, lead to an undefined InetAddress value. In
particular, InetAddressType/InetAddress pairs must be
changed together if the address type changes (e.g., from
ipv6(2) to ipv4(1))."
SYNTAX INTEGER {
unknown(0),
ipv4(1),
ipv6(2),
ipv4z(3),
ipv6z(4),
dns(16)
}
END

View file

@ -0,0 +1 @@
../linkTarget/

22
internal/snmp/testdata/mibs/testmib vendored Normal file
View file

@ -0,0 +1,22 @@
TGTEST-MIB DEFINITIONS ::= BEGIN
org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1
dod OBJECT IDENTIFIER ::= { org 6 }
internet OBJECT IDENTIFIER ::= { dod 1 }
mgmt OBJECT IDENTIFIER ::= { internet 2 }
mibs OBJECT IDENTIFIER ::= { mgmt 1 }
system OBJECT IDENTIFIER ::= { mibs 1 }
systemUpTime OBJECT IDENTIFIER ::= { system 3 }
sysUpTimeInstance OBJECT IDENTIFIER ::= { systemUpTime 0 }
private OBJECT IDENTIFIER ::= { internet 4 }
enterprises OBJECT IDENTIFIER ::= { private 1 }
snmpV2 OBJECT IDENTIFIER ::= { internet 6 }
snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 }
snmpMIB OBJECT IDENTIFIER ::= { snmpModules 1 }
snmpMIBObjects OBJECT IDENTIFIER ::= { snmpMIB 1 }
snmpTraps OBJECT IDENTIFIER ::= { snmpMIBObjects 5 }
coldStart OBJECT IDENTIFIER ::= { snmpTraps 1 }
END

View file

@ -0,0 +1,29 @@
package snmp
type TranslatorPlugin interface {
SetTranslator(name string) // Agent calls this on inputs before Init
}
type Translator interface {
SnmpTranslate(oid string) (
mibName string, oidNum string, oidText string,
conversion string,
err error,
)
SnmpTable(oid string) (
mibName string, oidNum string, oidText string,
fields []Field,
err error,
)
SnmpFormatEnum(oid string, value interface{}, full bool) (
formatted string,
err error,
)
SnmpFormatDisplayHint(oid string, value interface{}) (
formatted string,
err error,
)
}

View file

@ -0,0 +1,240 @@
package snmp
import (
"errors"
"fmt"
"strings"
"github.com/sleepinggenius2/gosmi"
"github.com/sleepinggenius2/gosmi/models"
"github.com/sleepinggenius2/gosmi/types"
"github.com/influxdata/telegraf"
)
var errCannotFormatUnkownType = errors.New("cannot format value, unknown type")
type gosmiTranslator struct {
}
func NewGosmiTranslator(paths []string, log telegraf.Logger) (*gosmiTranslator, error) {
err := LoadMibsFromPath(paths, log, &GosmiMibLoader{})
if err == nil {
return &gosmiTranslator{}, nil
}
return nil, err
}
//nolint:revive //function-result-limit conditionally 5 return results allowed
func (g *gosmiTranslator) SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) {
mibName, oidNum, oidText, conversion, _, err = snmpTranslateCall(oid)
return mibName, oidNum, oidText, conversion, err
}
// snmpTable resolves the given OID as a table, providing information about the
// table and fields within.
//
//nolint:revive //Too many return variable but necessary
func (g *gosmiTranslator) SnmpTable(oid string) (
mibName string, oidNum string, oidText string,
fields []Field,
err error) {
mibName, oidNum, oidText, _, node, err := snmpTranslateCall(oid)
if err != nil {
return "", "", "", nil, fmt.Errorf("translating: %w", err)
}
mibPrefix := mibName + "::"
col, tagOids := getIndex(mibPrefix, node)
for _, c := range col {
_, isTag := tagOids[mibPrefix+c]
fields = append(fields, Field{Name: c, Oid: mibPrefix + c, IsTag: isTag})
}
return mibName, oidNum, oidText, fields, nil
}
func (*gosmiTranslator) SnmpFormatEnum(oid string, value interface{}, full bool) (string, error) {
if value == nil {
return "", nil
}
//nolint:dogsled // only need to get the node
_, _, _, _, node, err := snmpTranslateCall(oid)
if err != nil {
return "", err
}
if node.Type == nil {
return "", errCannotFormatUnkownType
}
var v models.Value
if full {
v = node.FormatValue(value, models.FormatEnumName, models.FormatEnumValue)
} else {
v = node.FormatValue(value, models.FormatEnumName)
}
return v.String(), nil
}
func (*gosmiTranslator) SnmpFormatDisplayHint(oid string, value interface{}) (string, error) {
if value == nil {
return "", nil
}
//nolint:dogsled // only need to get the node
_, _, _, _, node, err := snmpTranslateCall(oid)
if err != nil {
return "", err
}
if node.Type == nil {
return "", errCannotFormatUnkownType
}
return node.FormatValue(value).String(), nil
}
func getIndex(mibPrefix string, node gosmi.SmiNode) (col []string, tagOids map[string]struct{}) {
// first attempt to get the table's tags
// mimcks grabbing INDEX {} that is returned from snmptranslate -Td MibName
indices := node.GetIndex()
tagOids = make(map[string]struct{}, len(indices))
for _, index := range indices {
tagOids[mibPrefix+index.Name] = struct{}{}
}
// grabs all columns from the table
// mimmicks grabbing everything returned from snmptable -Ch -Cl -c public 127.0.0.1 oidFullName
_, col = node.GetColumns()
return col, tagOids
}
//nolint:revive //Too many return variable but necessary
func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, node gosmi.SmiNode, err error) {
var out gosmi.SmiNode
var end string
if strings.ContainsAny(oid, "::") {
// split given oid
// for example RFC1213-MIB::sysUpTime.0
s := strings.SplitN(oid, "::", 2)
// moduleName becomes RFC1213
moduleName := s[0]
module, err := gosmi.GetModule(moduleName)
if err != nil {
return oid, oid, oid, "", gosmi.SmiNode{}, err
}
if s[1] == "" {
return "", oid, oid, "", gosmi.SmiNode{}, fmt.Errorf("cannot parse %v", oid)
}
// node becomes sysUpTime.0
node := s[1]
if strings.ContainsAny(node, ".") {
s = strings.SplitN(node, ".", 2)
// node becomes sysUpTime
node = s[0]
end = "." + s[1]
}
out, err = module.GetNode(node)
if err != nil {
return oid, oid, oid, "", out, err
}
if oidNum = out.RenderNumeric(); oidNum == "" {
return oid, oid, oid, "", out, fmt.Errorf("cannot translate %v into a numeric OID, please ensure all imported MIBs are in the path", oid)
}
oidNum = "." + oidNum + end
} else if strings.ContainsAny(oid, "abcdefghijklnmopqrstuvwxyz") {
//handle mixed oid ex. .iso.2.3
s := strings.Split(oid, ".")
for i := range s {
if strings.ContainsAny(s[i], "abcdefghijklmnopqrstuvwxyz") {
out, err = gosmi.GetNode(s[i])
if err != nil {
return oid, oid, oid, "", out, err
}
s[i] = out.RenderNumeric()
}
}
oidNum = strings.Join(s, ".")
out, err = gosmi.GetNodeByOID(types.OidMustFromString(oidNum))
if err != nil {
return oid, oid, oid, "", out, err
}
} else {
out, err = gosmi.GetNodeByOID(types.OidMustFromString(oid))
oidNum = oid
// ensure modules are loaded or node will be empty (might not error)
//nolint:nilerr // do not return the err as the oid is numeric and telegraf can continue
if err != nil || out.Name == "iso" {
return oid, oid, oid, "", out, nil
}
}
tc := out.GetSubtree()
for i := range tc {
// case where the mib doesn't have a conversion so Type struct will be nil
// prevents seg fault
if tc[i].Type == nil {
break
}
if tc[i].Type.Format != "" {
conversion = "displayhint"
} else {
switch tc[i].Type.Name {
case "InetAddress", "IPSIpAddress":
conversion = "ipaddr"
}
}
}
oidText = out.RenderQualified()
i := strings.Index(oidText, "::")
if i == -1 {
return "", oid, oid, "", out, errors.New("not found")
}
mibName = oidText[:i]
oidText = oidText[i+2:] + end
return mibName, oidNum, oidText, conversion, out, nil
}
// The following is for snmp_trap
type MibEntry struct {
MibName string
OidText string
}
func TrapLookup(oid string) (e MibEntry, err error) {
var givenOid types.Oid
if givenOid, err = types.OidFromString(oid); err != nil {
return e, fmt.Errorf("could not convert OID %s: %w", oid, err)
}
// Get node name
var node gosmi.SmiNode
if node, err = gosmi.GetNodeByOID(givenOid); err != nil {
return e, err
}
e.OidText = node.Name
// Add not found OID part
if !givenOid.Equals(node.Oid) {
e.OidText += "." + givenOid[len(node.Oid):].String()
}
// Get module name
module := node.GetModule()
if module.Name != "<well-known>" {
e.MibName = module.Name
}
return e, nil
}

View file

@ -0,0 +1,728 @@
package snmp
import (
"path/filepath"
"testing"
"github.com/gosnmp/gosnmp"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
func getGosmiTr(t *testing.T) Translator {
testDataPath, err := filepath.Abs("./testdata/gosmi")
require.NoError(t, err)
tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{})
require.NoError(t, err)
return tr
}
func TestGosmiTranslator(t *testing.T) {
var tr Translator
var err error
tr, err = NewGosmiTranslator([]string{"testdata"}, testutil.Logger{})
require.NoError(t, err)
require.NotNil(t, tr)
}
func TestFieldInitGosmi(t *testing.T) {
tr := getGosmiTr(t)
translations := []struct {
inputOid string
inputName string
inputConversion string
expectedOid string
expectedName string
expectedConversion string
}{
{".1.2.3", "foo", "", ".1.2.3", "foo", ""},
{".iso.2.3", "foo", "", ".1.2.3", "foo", ""},
{".1.0.0.0.1.1", "", "", ".1.0.0.0.1.1", "server", ""},
{".1.0.0.0.1.5", "", "", ".1.0.0.0.1.5", "dateAndTime", "displayhint"},
{"IF-MIB::ifPhysAddress.1", "", "", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "displayhint"},
{"IF-MIB::ifPhysAddress.1", "", "none", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "none"},
{"BRIDGE-MIB::dot1dTpFdbAddress.1", "", "", ".1.3.6.1.2.1.17.4.3.1.1.1", "dot1dTpFdbAddress.1", "displayhint"},
{"TCP-MIB::tcpConnectionLocalAddress.1", "", "", ".1.3.6.1.2.1.6.19.1.2.1", "tcpConnectionLocalAddress.1", "ipaddr"},
{".999", "", "", ".999", ".999", ""},
}
for _, txl := range translations {
f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion}
require.NoError(t, f.Init(tr), "inputOid=%q inputName=%q", txl.inputOid, txl.inputName)
require.Equal(t, txl.expectedOid, f.Oid, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion)
require.Equal(t, txl.expectedName, f.Name, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion)
require.Equal(t, txl.expectedConversion, f.Conversion, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion)
}
}
func TestTableInitGosmi(t *testing.T) {
tbl := Table{
Oid: ".1.3.6.1.2.1.3.1",
Fields: []Field{
{Oid: ".999", Name: "foo"},
{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", IsTag: true},
{Oid: "RFC1213-MIB::atPhysAddress", Name: "atPhysAddress"},
},
}
tr := getGosmiTr(t)
require.NoError(t, tbl.Init(tr))
require.Equal(t, "atTable", tbl.Name)
require.Len(t, tbl.Fields, 5)
require.Equal(t, ".999", tbl.Fields[0].Oid)
require.Equal(t, "foo", tbl.Fields[0].Name)
require.False(t, tbl.Fields[0].IsTag)
require.Empty(t, tbl.Fields[0].Conversion)
require.Equal(t, ".1.3.6.1.2.1.3.1.1.1", tbl.Fields[1].Oid)
require.Equal(t, "atIfIndex", tbl.Fields[1].Name)
require.True(t, tbl.Fields[1].IsTag)
require.Empty(t, tbl.Fields[1].Conversion)
require.Equal(t, ".1.3.6.1.2.1.3.1.1.2", tbl.Fields[2].Oid)
require.Equal(t, "atPhysAddress", tbl.Fields[2].Name)
require.False(t, tbl.Fields[2].IsTag)
require.Equal(t, "displayhint", tbl.Fields[2].Conversion)
require.Equal(t, ".1.3.6.1.2.1.3.1.1.3", tbl.Fields[4].Oid)
require.Equal(t, "atNetAddress", tbl.Fields[4].Name)
require.True(t, tbl.Fields[4].IsTag)
require.Empty(t, tbl.Fields[4].Conversion)
}
// TestTableBuild_walk in snmp_test.go is split into two tests here,
// noTranslate and Translate.
//
// This is only running with gosmi translator but should be valid with
// netsnmp too.
func TestTableBuild_walk_noTranslate(t *testing.T) {
tbl := Table{
Name: "mytable",
IndexAsTag: true,
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.0.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.0.1.2",
},
{
Name: "myfield3",
Oid: ".1.0.0.0.1.3",
Conversion: "float",
},
{
Name: "myfield4",
Oid: ".1.0.0.2.1.5",
OidIndexSuffix: ".9.9",
},
{
Name: "myfield5",
Oid: ".1.0.0.2.1.5",
OidIndexLength: 1,
},
},
}
tb, err := tbl.Build(tsc, true)
require.NoError(t, err)
require.Equal(t, "mytable", tb.Name)
rtr1 := RTableRow{
Tags: map[string]string{
"myfield1": "foo",
"index": "0",
},
Fields: map[string]interface{}{
"myfield2": 1,
"myfield3": float64(0.123),
"myfield4": 11,
"myfield5": 11,
},
}
rtr2 := RTableRow{
Tags: map[string]string{
"myfield1": "bar",
"index": "1",
},
Fields: map[string]interface{}{
"myfield2": 2,
"myfield3": float64(0.456),
"myfield4": 22,
"myfield5": 22,
},
}
rtr3 := RTableRow{
Tags: map[string]string{
"index": "2",
},
Fields: map[string]interface{}{
"myfield2": 0,
"myfield3": float64(0.0),
},
}
rtr4 := RTableRow{
Tags: map[string]string{
"index": "3",
},
Fields: map[string]interface{}{
"myfield3": float64(9.999),
},
}
require.Len(t, tb.Rows, 4)
require.Contains(t, tb.Rows, rtr1)
require.Contains(t, tb.Rows, rtr2)
require.Contains(t, tb.Rows, rtr3)
require.Contains(t, tb.Rows, rtr4)
}
func TestTableBuild_walk_Translate(t *testing.T) {
tbl := Table{
Name: "atTable",
IndexAsTag: true,
Fields: []Field{
{
Name: "ifIndex",
Oid: "1.3.6.1.2.1.3.1.1.1",
IsTag: true,
},
{
Name: "atPhysAddress",
Oid: "1.3.6.1.2.1.3.1.1.2",
Translate: false,
},
{
Name: "atNetAddress",
Oid: "1.3.6.1.2.1.3.1.1.3",
Translate: true,
},
},
}
require.NoError(t, tbl.Init(getGosmiTr(t)))
tb, err := tbl.Build(tsc, true)
require.NoError(t, err)
require.Equal(t, "atTable", tb.Name)
rtr1 := RTableRow{
Tags: map[string]string{
"ifIndex": "foo",
"index": "0",
},
Fields: map[string]interface{}{
"atPhysAddress": 1,
"atNetAddress": "atNetAddress",
},
}
rtr2 := RTableRow{
Tags: map[string]string{
"ifIndex": "bar",
"index": "1",
},
Fields: map[string]interface{}{
"atPhysAddress": 2,
},
}
rtr3 := RTableRow{
Tags: map[string]string{
"index": "2",
},
Fields: map[string]interface{}{
"atPhysAddress": 0,
},
}
require.Len(t, tb.Rows, 3)
require.Contains(t, tb.Rows, rtr1)
require.Contains(t, tb.Rows, rtr2)
require.Contains(t, tb.Rows, rtr3)
}
func TestTableBuild_noWalkGosmi(t *testing.T) {
tbl := Table{
Name: "mytable",
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.1.2",
},
{
Name: "myfield3",
Oid: ".1.0.0.1.2",
IsTag: true,
},
{
Name: "empty",
Oid: ".1.0.0.0.1.1.2",
},
{
Name: "noexist",
Oid: ".1.2.3.4.5",
},
{
Name: "myfield4",
Oid: ".1.3.6.1.2.1.3.1.1.3.0",
Translate: true,
},
},
}
require.NoError(t, tbl.Init(getGosmiTr(t)))
tb, err := tbl.Build(tsc, false)
require.NoError(t, err)
rtr := RTableRow{
Tags: map[string]string{"myfield1": "baz", "myfield3": "234"},
Fields: map[string]interface{}{"myfield2": 234, "myfield4": "atNetAddress"},
}
require.Len(t, tb.Rows, 1)
require.Contains(t, tb.Rows, rtr)
}
func TestFieldConvertGosmi(t *testing.T) {
testTable := []struct {
input interface{}
conv string
expected interface{}
}{
{"0.123", "float", float64(0.123)},
{[]byte("0.123"), "float", float64(0.123)},
{float32(0.123), "float", float64(float32(0.123))},
{float64(0.123), "float", float64(0.123)},
{float64(0.123123123123), "float", float64(0.123123123123)},
{123, "float", float64(123)},
{123, "float(0)", float64(123)},
{123, "float(4)", float64(0.0123)},
{int8(123), "float(3)", float64(0.123)},
{int16(123), "float(3)", float64(0.123)},
{int32(123), "float(3)", float64(0.123)},
{int64(123), "float(3)", float64(0.123)},
{uint(123), "float(3)", float64(0.123)},
{uint8(123), "float(3)", float64(0.123)},
{uint16(123), "float(3)", float64(0.123)},
{uint32(123), "float(3)", float64(0.123)},
{uint64(123), "float(3)", float64(0.123)},
{"123", "int", int64(123)},
{[]byte("123"), "int", int64(123)},
{"123123123123", "int", int64(123123123123)},
{[]byte("123123123123"), "int", int64(123123123123)},
{float32(12.3), "int", int64(12)},
{float64(12.3), "int", int64(12)},
{123, "int", int64(123)},
{int8(123), "int", int64(123)},
{int16(123), "int", int64(123)},
{int32(123), "int", int64(123)},
{int64(123), "int", int64(123)},
{uint(123), "int", int64(123)},
{uint8(123), "int", int64(123)},
{uint16(123), "int", int64(123)},
{uint32(123), "int", int64(123)},
{uint64(123), "int", int64(123)},
{[]byte("abcdef"), "hwaddr", "61:62:63:64:65:66"},
{"abcdef", "hwaddr", "61:62:63:64:65:66"},
{[]byte("abcd"), "ipaddr", "97.98.99.100"},
{"abcd", "ipaddr", "97.98.99.100"},
{[]byte("abcdefghijklmnop"), "ipaddr", "6162:6364:6566:6768:696a:6b6c:6d6e:6f70"},
{3, "enum", "testing"},
{3, "enum(1)", "testing(3)"},
}
for _, tc := range testTable {
f := Field{
Name: "test",
Conversion: tc.conv,
}
require.NoError(t, f.Init(getGosmiTr(t)))
act, err := f.Convert(gosnmp.SnmpPDU{Name: ".1.3.6.1.2.1.2.2.1.8", Value: tc.input})
require.NoError(t, err, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected)
require.EqualValues(t, tc.expected, act, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected)
}
}
func TestSnmpFormatDisplayHint(t *testing.T) {
tests := []struct {
name string
oid string
input interface{}
expected string
}{
{
name: "ifOperStatus",
oid: ".1.3.6.1.2.1.2.2.1.8",
input: 3,
expected: "testing(3)",
}, {
name: "ifPhysAddress",
oid: ".1.3.6.1.2.1.2.2.1.6",
input: []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef},
expected: "01:23:45:67:89:ab:cd:ef",
}, {
name: "DateAndTime short",
oid: ".1.0.0.0.1.5",
input: []byte{0x07, 0xe8, 0x09, 0x18, 0x10, 0x24, 0x27, 0x05},
expected: "2024-9-24,16:36:39.5",
}, {
name: "DateAndTime long",
oid: ".1.0.0.0.1.5",
input: []byte{0x07, 0xe8, 0x09, 0x18, 0x10, 0x24, 0x27, 0x05, 0x2b, 0x02, 0x00},
expected: "2024-9-24,16:36:39.5,+2:0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tr := getGosmiTr(t)
actual, err := tr.SnmpFormatDisplayHint(tt.oid, tt.input)
require.NoError(t, err)
require.Equal(t, tt.expected, actual)
})
}
}
func TestTableJoin_walkGosmi(t *testing.T) {
tbl := Table{
Name: "mytable",
IndexAsTag: true,
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.3.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.3.1.2",
},
{
Name: "myfield3",
Oid: ".1.0.0.3.1.3",
SecondaryIndexTable: true,
},
{
Name: "myfield4",
Oid: ".1.0.0.0.1.1",
SecondaryIndexUse: true,
IsTag: true,
},
{
Name: "myfield5",
Oid: ".1.0.0.0.1.2",
SecondaryIndexUse: true,
},
},
}
require.NoError(t, tbl.Init(getGosmiTr(t)))
tb, err := tbl.Build(tsc, true)
require.NoError(t, err)
require.Equal(t, "mytable", tb.Name)
rtr1 := RTableRow{
Tags: map[string]string{
"myfield1": "instance",
"myfield4": "bar",
"index": "10",
},
Fields: map[string]interface{}{
"myfield2": 10,
"myfield3": 1,
"myfield5": 2,
},
}
rtr2 := RTableRow{
Tags: map[string]string{
"myfield1": "instance2",
"index": "11",
},
Fields: map[string]interface{}{
"myfield2": 20,
"myfield3": 2,
"myfield5": 0,
},
}
rtr3 := RTableRow{
Tags: map[string]string{
"myfield1": "instance3",
"index": "12",
},
Fields: map[string]interface{}{
"myfield2": 20,
"myfield3": 3,
},
}
require.Len(t, tb.Rows, 3)
require.Contains(t, tb.Rows, rtr1)
require.Contains(t, tb.Rows, rtr2)
require.Contains(t, tb.Rows, rtr3)
}
func TestTableOuterJoin_walkGosmi(t *testing.T) {
tbl := Table{
Name: "mytable",
IndexAsTag: true,
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.3.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.3.1.2",
},
{
Name: "myfield3",
Oid: ".1.0.0.3.1.3",
SecondaryIndexTable: true,
SecondaryOuterJoin: true,
},
{
Name: "myfield4",
Oid: ".1.0.0.0.1.1",
SecondaryIndexUse: true,
IsTag: true,
},
{
Name: "myfield5",
Oid: ".1.0.0.0.1.2",
SecondaryIndexUse: true,
},
},
}
tb, err := tbl.Build(tsc, true)
require.NoError(t, err)
require.Equal(t, "mytable", tb.Name)
rtr1 := RTableRow{
Tags: map[string]string{
"myfield1": "instance",
"myfield4": "bar",
"index": "10",
},
Fields: map[string]interface{}{
"myfield2": 10,
"myfield3": 1,
"myfield5": 2,
},
}
rtr2 := RTableRow{
Tags: map[string]string{
"myfield1": "instance2",
"index": "11",
},
Fields: map[string]interface{}{
"myfield2": 20,
"myfield3": 2,
"myfield5": 0,
},
}
rtr3 := RTableRow{
Tags: map[string]string{
"myfield1": "instance3",
"index": "12",
},
Fields: map[string]interface{}{
"myfield2": 20,
"myfield3": 3,
},
}
rtr4 := RTableRow{
Tags: map[string]string{
"index": "Secondary.0",
"myfield4": "foo",
},
Fields: map[string]interface{}{
"myfield5": 1,
},
}
require.Len(t, tb.Rows, 4)
require.Contains(t, tb.Rows, rtr1)
require.Contains(t, tb.Rows, rtr2)
require.Contains(t, tb.Rows, rtr3)
require.Contains(t, tb.Rows, rtr4)
}
func TestTableJoinNoIndexAsTag_walkGosmi(t *testing.T) {
tbl := Table{
Name: "mytable",
IndexAsTag: false,
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.3.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.3.1.2",
},
{
Name: "myfield3",
Oid: ".1.0.0.3.1.3",
SecondaryIndexTable: true,
},
{
Name: "myfield4",
Oid: ".1.0.0.0.1.1",
SecondaryIndexUse: true,
IsTag: true,
},
{
Name: "myfield5",
Oid: ".1.0.0.0.1.2",
SecondaryIndexUse: true,
},
},
}
tb, err := tbl.Build(tsc, true)
require.NoError(t, err)
require.Equal(t, "mytable", tb.Name)
rtr1 := RTableRow{
Tags: map[string]string{
"myfield1": "instance",
"myfield4": "bar",
// "index": "10",
},
Fields: map[string]interface{}{
"myfield2": 10,
"myfield3": 1,
"myfield5": 2,
},
}
rtr2 := RTableRow{
Tags: map[string]string{
"myfield1": "instance2",
// "index": "11",
},
Fields: map[string]interface{}{
"myfield2": 20,
"myfield3": 2,
"myfield5": 0,
},
}
rtr3 := RTableRow{
Tags: map[string]string{
"myfield1": "instance3",
// "index": "12",
},
Fields: map[string]interface{}{
"myfield2": 20,
"myfield3": 3,
},
}
require.Len(t, tb.Rows, 3)
require.Contains(t, tb.Rows, rtr1)
require.Contains(t, tb.Rows, rtr2)
require.Contains(t, tb.Rows, rtr3)
}
func TestCanNotParse(t *testing.T) {
tr := getGosmiTr(t)
f := Field{
Oid: "RFC1213-MIB::",
}
require.Error(t, f.Init(tr))
}
func TestTrapLookup(t *testing.T) {
tests := []struct {
name string
oid string
expected MibEntry
}{
{
name: "Known trap OID",
oid: ".1.3.6.1.6.3.1.1.5.1",
expected: MibEntry{
MibName: "TGTEST-MIB",
OidText: "coldStart",
},
},
{
name: "Known trap value OID",
oid: ".1.3.6.1.2.1.1.3.0",
expected: MibEntry{
MibName: "TGTEST-MIB",
OidText: "sysUpTimeInstance",
},
},
{
name: "Unknown enterprise sub-OID",
oid: ".1.3.6.1.4.1.0.1.2.3",
expected: MibEntry{
MibName: "SNMPv2-SMI",
OidText: "enterprises.0.1.2.3",
},
},
{
name: "Unknown MIB",
oid: ".1.999",
expected: MibEntry{OidText: "iso.999"},
},
}
// Load the MIBs
getGosmiTr(t)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Run the actual test
actual, err := TrapLookup(tt.oid)
require.NoError(t, err)
require.Equal(t, tt.expected, actual)
})
}
}
func TestTrapLookupFail(t *testing.T) {
tests := []struct {
name string
oid string
expected string
}{
{
name: "New top level OID",
oid: ".3.6.1.3.0",
expected: "Could not find node for OID 3.6.1.3.0",
},
{
name: "Malformed OID",
oid: ".1.3.dod.1.3.0",
expected: "could not convert OID .1.3.dod.1.3.0: strconv.ParseUint: parsing \"dod\": invalid syntax",
},
}
// Load the MIBs
getGosmiTr(t)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Run the actual test
_, err := TrapLookup(tt.oid)
require.EqualError(t, err, tt.expected)
})
}
}

View file

@ -0,0 +1,272 @@
package snmp
import (
"bufio"
"bytes"
"errors"
"fmt"
"os/exec"
"strings"
"sync"
"github.com/influxdata/telegraf"
)
// struct that implements the translator interface. This calls existing
// code to exec netsnmp's snmptranslate program
type netsnmpTranslator struct {
log telegraf.Logger
}
func NewNetsnmpTranslator(log telegraf.Logger) *netsnmpTranslator {
return &netsnmpTranslator{log: log}
}
type snmpTableCache struct {
mibName string
oidNum string
oidText string
fields []Field
err error
}
// execCommand is so tests can mock out exec.Command usage.
var execCommand = exec.Command
// execCmd executes the specified command, returning the STDOUT content.
// If command exits with error status, the output is captured into the returned error.
func (n *netsnmpTranslator) execCmd(arg0 string, args ...string) ([]byte, error) {
quoted := make([]string, 0, len(args))
for _, arg := range args {
quoted = append(quoted, fmt.Sprintf("%q", arg))
}
n.log.Debugf("executing %q %s", arg0, strings.Join(quoted, " "))
out, err := execCommand(arg0, args...).Output()
if err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
return nil, fmt.Errorf("%s: %w", bytes.TrimRight(exitErr.Stderr, "\r\n"), err)
}
return nil, err
}
return out, nil
}
var snmpTableCaches map[string]snmpTableCache
var snmpTableCachesLock sync.Mutex
// snmpTable resolves the given OID as a table, providing information about the
// table and fields within.
//
//nolint:revive //function-result-limit conditionally 5 return results allowed
func (n *netsnmpTranslator) SnmpTable(oid string) (
mibName string, oidNum string, oidText string,
fields []Field,
err error) {
snmpTableCachesLock.Lock()
if snmpTableCaches == nil {
snmpTableCaches = map[string]snmpTableCache{}
}
var stc snmpTableCache
var ok bool
if stc, ok = snmpTableCaches[oid]; !ok {
stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err = n.snmpTableCall(oid)
snmpTableCaches[oid] = stc
}
snmpTableCachesLock.Unlock()
return stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err
}
//nolint:revive //function-result-limit conditionally 5 return results allowed
func (n *netsnmpTranslator) snmpTableCall(oid string) (
mibName string, oidNum string, oidText string,
fields []Field,
err error) {
mibName, oidNum, oidText, _, err = n.SnmpTranslate(oid)
if err != nil {
return "", "", "", nil, fmt.Errorf("translating: %w", err)
}
mibPrefix := mibName + "::"
oidFullName := mibPrefix + oidText
// first attempt to get the table's tags
tagOids := map[string]struct{}{}
// We have to guess that the "entry" oid is `oid+".1"`. snmptable and snmptranslate don't seem to have a way to provide the info.
if out, err := n.execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil {
scanner := bufio.NewScanner(bytes.NewBuffer(out))
for scanner.Scan() {
line := scanner.Text()
if !strings.HasPrefix(line, " INDEX") {
continue
}
i := strings.Index(line, "{ ")
if i == -1 { // parse error
continue
}
line = line[i+2:]
i = strings.Index(line, " }")
if i == -1 { // parse error
continue
}
line = line[:i]
for _, col := range strings.Split(line, ", ") {
tagOids[mibPrefix+col] = struct{}{}
}
}
}
// this won't actually try to run a query. The `-Ch` will just cause it to dump headers.
out, err := n.execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName)
if err != nil {
return "", "", "", nil, fmt.Errorf("getting table columns: %w", err)
}
scanner := bufio.NewScanner(bytes.NewBuffer(out))
scanner.Scan()
cols := scanner.Text()
if len(cols) == 0 {
return "", "", "", nil, errors.New("could not find any columns in table")
}
for _, col := range strings.Split(cols, " ") {
if len(col) == 0 {
continue
}
_, isTag := tagOids[mibPrefix+col]
fields = append(fields, Field{Name: col, Oid: mibPrefix + col, IsTag: isTag})
}
return mibName, oidNum, oidText, fields, err
}
type snmpTranslateCache struct {
mibName string
oidNum string
oidText string
conversion string
err error
}
var snmpTranslateCachesLock sync.Mutex
var snmpTranslateCaches map[string]snmpTranslateCache
// snmpTranslate resolves the given OID.
//
//nolint:revive //function-result-limit conditionally 5 return results allowed
func (n *netsnmpTranslator) SnmpTranslate(oid string) (
mibName string, oidNum string, oidText string,
conversion string,
err error) {
snmpTranslateCachesLock.Lock()
if snmpTranslateCaches == nil {
snmpTranslateCaches = map[string]snmpTranslateCache{}
}
var stc snmpTranslateCache
var ok bool
if stc, ok = snmpTranslateCaches[oid]; !ok {
// This will result in only one call to snmptranslate running at a time.
// We could speed it up by putting a lock in snmpTranslateCache and then
// returning it immediately, and multiple callers would then release the
// snmpTranslateCachesLock and instead wait on the individual
// snmpTranslation.Lock to release. But I don't know that the extra complexity
// is worth it. Especially when it would slam the system pretty hard if lots
// of lookups are being performed.
stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = n.snmpTranslateCall(oid)
snmpTranslateCaches[oid] = stc
}
snmpTranslateCachesLock.Unlock()
return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err
}
//nolint:revive //function-result-limit conditionally 5 return results allowed
func (n *netsnmpTranslator) snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) {
var out []byte
if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") {
out, err = n.execCmd("snmptranslate", "-Td", "-Ob", oid)
} else {
out, err = n.execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid)
var execErr *exec.Error
if errors.As(err, &execErr) && errors.Is(execErr, exec.ErrNotFound) {
// Silently discard error if snmptranslate not found and we have a numeric OID.
// Meaning we can get by without the lookup.
return "", oid, oid, "", nil
}
}
if err != nil {
return "", "", "", "", err
}
scanner := bufio.NewScanner(bytes.NewBuffer(out))
ok := scanner.Scan()
if !ok && scanner.Err() != nil {
return "", "", "", "", fmt.Errorf("getting OID text: %w", scanner.Err())
}
oidText = scanner.Text()
i := strings.Index(oidText, "::")
if i == -1 {
// was not found in MIB.
if bytes.Contains(out, []byte("[TRUNCATED]")) {
return "", oid, oid, "", nil
}
// not truncated, but not fully found. We still need to parse out numeric OID, so keep going
oidText = oid
} else {
mibName = oidText[:i]
oidText = oidText[i+2:]
}
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, " -- TEXTUAL CONVENTION ") {
tc := strings.TrimPrefix(line, " -- TEXTUAL CONVENTION ")
switch tc {
case "MacAddress", "PhysAddress":
conversion = "hwaddr"
case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress":
conversion = "ipaddr"
}
} else if strings.HasPrefix(line, "::= { ") {
objs := strings.TrimPrefix(line, "::= { ")
objs = strings.TrimSuffix(objs, " }")
for _, obj := range strings.Split(objs, " ") {
if len(obj) == 0 {
continue
}
if i := strings.Index(obj, "("); i != -1 {
obj = obj[i+1:]
if j := strings.Index(obj, ")"); j != -1 {
oidNum += "." + obj[:j]
} else {
return "", "", "", "", fmt.Errorf("getting OID number from: %s", obj)
}
} else {
oidNum += "." + obj
}
}
break
}
}
return mibName, oidNum, oidText, conversion, nil
}
func (*netsnmpTranslator) SnmpFormatEnum(string, interface{}, bool) (string, error) {
return "", errors.New("not implemented in netsnmp translator")
}
func (*netsnmpTranslator) SnmpFormatDisplayHint(string, interface{}) (string, error) {
return "", errors.New("not implemented in netsnmp translator")
}

View file

@ -0,0 +1,102 @@
//go:build generate
package main
import (
"bufio"
"bytes"
"fmt"
"os"
"os/exec"
"strings"
)
// This file is a generator used to generate the mocks for the commands used by the tests.
// These are the commands to be mocked.
var mockedCommands = [][]string{
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0"},
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.1"},
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.2"},
{"snmptranslate", "-Td", "-Ob", "-m", "all", "1.0.0.1.1"},
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1"},
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1.0"},
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.5"},
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.2.3"},
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.7"},
{"snmptranslate", "-Td", "-Ob", ".iso.2.3"},
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".999"},
{"snmptranslate", "-Td", "-Ob", "TEST::server"},
{"snmptranslate", "-Td", "-Ob", "TEST::server.0"},
{"snmptranslate", "-Td", "-Ob", "TEST::testTable"},
{"snmptranslate", "-Td", "-Ob", "TEST::connections"},
{"snmptranslate", "-Td", "-Ob", "TEST::latency"},
{"snmptranslate", "-Td", "-Ob", "TEST::description"},
{"snmptranslate", "-Td", "-Ob", "TEST::hostname"},
{"snmptranslate", "-Td", "-Ob", "IF-MIB::ifPhysAddress.1"},
{"snmptranslate", "-Td", "-Ob", "BRIDGE-MIB::dot1dTpFdbAddress.1"},
{"snmptranslate", "-Td", "-Ob", "TCP-MIB::tcpConnectionLocalAddress.1"},
{"snmptranslate", "-Td", "TEST::testTable.1"},
{"snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", "TEST::testTable"},
}
type mockedCommandResult struct {
stdout string
stderr string
exitError bool
}
func main() {
if err := generate(); err != nil {
fmt.Fprintf(os.Stderr, "error: %s\n", err)
os.Exit(1)
}
}
func generate() error {
f, err := os.OpenFile("snmp_mocks_test.go", os.O_RDWR, 0644)
if err != nil {
return err
}
br := bufio.NewReader(f)
var i int64
for l, err := br.ReadString('\n'); err == nil; l, err = br.ReadString('\n') {
i += int64(len(l))
if l == "// BEGIN GO GENERATE CONTENT\n" {
break
}
}
f.Truncate(i)
f.Seek(i, 0)
fmt.Fprintf(f, "var mockedCommandResults = map[string]mockedCommandResult{\n")
for _, cmd := range mockedCommands {
ec := exec.Command(cmd[0], cmd[1:]...)
out := bytes.NewBuffer(nil)
err := bytes.NewBuffer(nil)
ec.Stdout = out
ec.Stderr = err
ec.Env = []string{
"MIBDIRS=+./testdata",
}
var mcr mockedCommandResult
if err := ec.Run(); err != nil {
if err, ok := err.(*exec.ExitError); !ok {
mcr.exitError = true
} else {
return fmt.Errorf("executing %v: %s", cmd, err)
}
}
mcr.stdout = string(out.Bytes())
mcr.stderr = string(err.Bytes())
cmd0 := strings.Join(cmd, "\000")
mcrv := fmt.Sprintf("%#v", mcr)[5:] // trim `main.` prefix
fmt.Fprintf(f, "%#v: %s,\n", cmd0, mcrv)
}
f.Write([]byte("}\n"))
f.Close()
return exec.Command("gofmt", "-w", "translator_netsnmp_mocks_test.go").Run()
}

View file

@ -0,0 +1,211 @@
package snmp
import (
"fmt"
"os"
"os/exec"
"strings"
"testing"
)
type mockedCommandResult struct {
stdout string
stderr string
exitError bool
}
func mockExecCommand(arg0 string, args ...string) *exec.Cmd {
args = append([]string{"-test.run=TestMockExecCommand", "--", arg0}, args...)
cmd := exec.Command(os.Args[0], args...)
cmd.Stderr = os.Stderr // so the test output shows errors
return cmd
}
// This is not a real test. This is just a way of mocking out commands.
//
// Idea based on https://github.com/golang/go/blob/7c31043/src/os/exec/exec_test.go#L568
func TestMockExecCommand(_ *testing.T) {
var cmd []string //nolint:prealloc // Pre-allocated this slice would break the algorithm
for _, arg := range os.Args {
if arg == "--" {
cmd = make([]string, 0)
continue
}
if cmd == nil {
continue
}
cmd = append(cmd, arg)
}
if cmd == nil {
return
}
cmd0 := strings.Join(cmd, "\000")
mcr, ok := mockedCommandResults[cmd0]
if !ok {
cv := fmt.Sprintf("%#v", cmd)[8:] // trim `[]string` prefix
fmt.Fprintf(
os.Stderr,
"Unmocked command. Please add the following to `mockedCommands` in snmp_mocks_generate.go, and then run `go generate`:\n\t%s,\n",
cv,
)
//nolint:revive // error code is important for this "test"
os.Exit(1)
}
fmt.Printf("%s", mcr.stdout)
fmt.Fprintf(os.Stderr, "%s", mcr.stderr)
if mcr.exitError {
//nolint:revive // error code is important for this "test"
os.Exit(1)
}
//nolint:revive // error code is important for this "test"
os.Exit(0)
}
func init() {
execCommand = mockExecCommand
}
// BEGIN GO GENERATE CONTENT
var mockedCommandResults = map[string]mockedCommandResult{
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0": {
stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.1": {
stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n " +
"STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.2": {
stdout: "TEST::1.2\nanonymous#1 OBJECT-TYPE\n -- FROM\tTEST\n::= { iso(1) 0 testOID(0) 1 2 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x001.0.0.1.1": {
stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n " +
"STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1": {
stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n " +
"STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": {
stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n " +
"STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": {
stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n " +
"STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": {
stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.7": {
stdout: "TEST::testTableEntry.7\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n " +
"STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) std(0) testOID(0) testTable(0) testTableEntry(1) 7 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": {
stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": {stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false},
"snmptranslate\x00-Td\x00-Ob\x00TEST::server": {
stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n " +
"STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00TEST::server.0": {
stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n " +
"STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00TEST::testTable": {
stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00TEST::connections": {
stdout: "TEST::connections\nconnections OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tINTEGER\n MAX-ACCESS\tread-only\n " +
"STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 2 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00TEST::latency": {
stdout: "TEST::latency\nlatency OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n " +
"STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 3 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00TEST::description": {
stdout: "TEST::description\ndescription OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n " +
"STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 4 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00TEST::hostname": {
stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n " +
"STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00IF-MIB::ifPhysAddress.1": {
stdout: "IF-MIB::ifPhysAddress.1\nifPhysAddress OBJECT-TYPE\n -- FROM\tIF-MIB\n -- TEXTUAL CONVENTION PhysAddress\n SYNTAX\tOCTET STRING\n " +
"DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"The interface's address at its protocol sub-layer. " +
"For\n example, for an 802.x interface, this object normally\n contains a MAC address. " +
"The interface's media-specific MIB\n must define the bit and byte ordering and the format of the\n " +
"value of this object. For interfaces which do not have such\n an address (e.g., a serial line), " +
"this object should contain\n an octet string of zero length.\"\n::= " +
"{ iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) interfaces(2) ifTable(2) ifEntry(1) ifPhysAddress(6) 1 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00BRIDGE-MIB::dot1dTpFdbAddress.1": {
stdout: "BRIDGE-MIB::dot1dTpFdbAddress.1\ndot1dTpFdbAddress OBJECT-TYPE\n -- FROM\tBRIDGE-MIB\n -- TEXTUAL CONVENTION MacAddress\n " +
"SYNTAX\tOCTET STRING (6) \n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"" +
"A unicast MAC address for which the bridge has\n forwarding and/or filtering information.\"\n::= " +
"{ iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) dot1dBridge(17) dot1dTp(4) dot1dTpFdbTable(3) dot1dTpFdbEntry(1) dot1dTpFdbAddress(1) 1 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00-Ob\x00TCP-MIB::tcpConnectionLocalAddress.1": {
stdout: "TCP-MIB::tcpConnectionLocalAddress.1\ntcpConnectionLocalAddress OBJECT-TYPE\n -- FROM\tTCP-MIB\n -- " +
"TEXTUAL CONVENTION InetAddress\n SYNTAX\tOCTET STRING (0..255) \n MAX-ACCESS\tnot-accessible\n " +
"STATUS\tcurrent\n DESCRIPTION\t\"The local IP address for this TCP connection. " +
"The type\n of this address is determined by the value of\n tcpConnectionLocalAddressType.\n\n " +
"As this object is used in the index for the\n tcpConnectionTable, implementors should be\n " +
"careful not to create entries that would result in OIDs\n with more than 128 subidentifiers; " +
"otherwise the information\n cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3.\"\n" +
"::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) tcp(6) tcpConnectionTable(19) tcpConnectionEntry(1) tcpConnectionLocalAddress(2) 1 }\n",
stderr: "",
exitError: false,
},
"snmptranslate\x00-Td\x00TEST::testTable.1": {
stdout: "TEST::testTableEntry\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n " +
"STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) 1 }\n",
stderr: "",
exitError: false,
},
"snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": {
stdout: "server connections latency description \nTEST::testTable: No entries\n",
stderr: "",
exitError: false,
},
}

View file

@ -0,0 +1,291 @@
//go:generate go run -tags generate translator_netsnmp_mocks_generate.go
package snmp
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
func TestFieldInit(t *testing.T) {
translations := []struct {
inputOid string
inputName string
inputConversion string
expectedOid string
expectedName string
expectedConversion string
}{
{".1.2.3", "foo", "", ".1.2.3", "foo", ""},
{".iso.2.3", "foo", "", ".1.2.3", "foo", ""},
{".1.0.0.0.1.1", "", "", ".1.0.0.0.1.1", "server", ""},
{".1.0.0.0.1.1.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""},
{".999", "", "", ".999", ".999", ""},
{"TEST::server", "", "", ".1.0.0.0.1.1", "server", ""},
{"TEST::server.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""},
{"TEST::server", "foo", "", ".1.0.0.0.1.1", "foo", ""},
{"IF-MIB::ifPhysAddress.1", "", "", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "hwaddr"},
{"IF-MIB::ifPhysAddress.1", "", "none", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "none"},
{"BRIDGE-MIB::dot1dTpFdbAddress.1", "", "", ".1.3.6.1.2.1.17.4.3.1.1.1", "dot1dTpFdbAddress.1", "hwaddr"},
{"TCP-MIB::tcpConnectionLocalAddress.1", "", "", ".1.3.6.1.2.1.6.19.1.2.1", "tcpConnectionLocalAddress.1", "ipaddr"},
}
tr := NewNetsnmpTranslator(testutil.Logger{})
for _, txl := range translations {
f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion}
err := f.Init(tr)
require.NoError(t, err, "inputOid=%q inputName=%q", txl.inputOid, txl.inputName)
require.Equal(t, txl.expectedOid, f.Oid, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion)
require.Equal(t, txl.expectedName, f.Name, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion)
}
}
func TestTableInit(t *testing.T) {
tbl := Table{
Oid: ".1.0.0.0",
Fields: []Field{
{Oid: ".999", Name: "foo"},
{Oid: "TEST::description", Name: "description", IsTag: true},
},
}
err := tbl.Init(NewNetsnmpTranslator(testutil.Logger{}))
require.NoError(t, err)
require.Equal(t, "testTable", tbl.Name)
require.Len(t, tbl.Fields, 5)
require.Equal(t, ".999", tbl.Fields[0].Oid)
require.Equal(t, "foo", tbl.Fields[0].Name)
require.False(t, tbl.Fields[0].IsTag)
require.Empty(t, tbl.Fields[0].Conversion)
require.Equal(t, ".1.0.0.0.1.1", tbl.Fields[2].Oid)
require.Equal(t, "server", tbl.Fields[2].Name)
require.True(t, tbl.Fields[1].IsTag)
require.Empty(t, tbl.Fields[1].Conversion)
require.Equal(t, ".1.0.0.0.1.2", tbl.Fields[3].Oid)
require.Equal(t, "connections", tbl.Fields[3].Name)
require.False(t, tbl.Fields[3].IsTag)
require.Empty(t, tbl.Fields[3].Conversion)
require.Equal(t, ".1.0.0.0.1.3", tbl.Fields[4].Oid)
require.Equal(t, "latency", tbl.Fields[4].Name)
require.False(t, tbl.Fields[4].IsTag)
require.Empty(t, tbl.Fields[4].Conversion)
require.Equal(t, ".1.0.0.0.1.4", tbl.Fields[1].Oid)
require.Equal(t, "description", tbl.Fields[1].Name)
require.True(t, tbl.Fields[1].IsTag)
require.Empty(t, tbl.Fields[1].Conversion)
}
func TestTableBuild_walk(t *testing.T) {
tbl := Table{
Name: "mytable",
IndexAsTag: true,
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.0.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.0.1.2",
},
{
Name: "myfield3",
Oid: ".1.0.0.0.1.3",
Conversion: "float",
},
{
Name: "myfield4",
Oid: ".1.0.0.2.1.5",
OidIndexSuffix: ".9.9",
},
{
Name: "myfield5",
Oid: ".1.0.0.2.1.5",
OidIndexLength: 1,
},
{
Name: "myfield6",
Oid: ".1.0.0.0.1.6",
Translate: true,
},
{
Name: "myfield7",
Oid: ".1.0.0.0.1.6",
Translate: false,
},
},
}
require.NoError(t, tbl.Init(NewNetsnmpTranslator(testutil.Logger{})))
tb, err := tbl.Build(tsc, true)
require.NoError(t, err)
require.Equal(t, "mytable", tb.Name)
rtr1 := RTableRow{
Tags: map[string]string{
"myfield1": "foo",
"index": "0",
},
Fields: map[string]interface{}{
"myfield2": 1,
"myfield3": float64(0.123),
"myfield4": 11,
"myfield5": 11,
"myfield6": "testTableEntry.7",
"myfield7": ".1.0.0.0.1.7",
},
}
rtr2 := RTableRow{
Tags: map[string]string{
"myfield1": "bar",
"index": "1",
},
Fields: map[string]interface{}{
"myfield2": 2,
"myfield3": float64(0.456),
"myfield4": 22,
"myfield5": 22,
},
}
rtr3 := RTableRow{
Tags: map[string]string{
"index": "2",
},
Fields: map[string]interface{}{
"myfield2": 0,
"myfield3": float64(0.0),
},
}
rtr4 := RTableRow{
Tags: map[string]string{
"index": "3",
},
Fields: map[string]interface{}{
"myfield3": float64(9.999),
},
}
require.Len(t, tb.Rows, 4)
require.Contains(t, tb.Rows, rtr1)
require.Contains(t, tb.Rows, rtr2)
require.Contains(t, tb.Rows, rtr3)
require.Contains(t, tb.Rows, rtr4)
}
func TestTableBuild_noWalk(t *testing.T) {
tbl := Table{
Name: "mytable",
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.1.2",
},
{
Name: "myfield3",
Oid: ".1.0.0.1.2",
IsTag: true,
},
{
Name: "empty",
Oid: ".1.0.0.0.1.1.2",
},
{
Name: "noexist",
Oid: ".1.2.3.4.5",
},
{
Name: "myfield4",
Oid: ".1.0.0.0.1.6.0",
Translate: true,
},
},
}
require.NoError(t, tbl.Init(NewNetsnmpTranslator(testutil.Logger{})))
tb, err := tbl.Build(tsc, false)
require.NoError(t, err)
rtr := RTableRow{
Tags: map[string]string{"myfield1": "baz", "myfield3": "234"},
Fields: map[string]interface{}{"myfield2": 234, "myfield4": "testTableEntry.7"},
}
require.Len(t, tb.Rows, 1)
require.Contains(t, tb.Rows, rtr)
}
func TestSnmpTranslateCache_miss(t *testing.T) {
snmpTranslateCaches = nil
oid := "IF-MIB::ifPhysAddress.1"
mibName, oidNum, oidText, conversion, err := NewNetsnmpTranslator(testutil.Logger{}).SnmpTranslate(oid)
require.Len(t, snmpTranslateCaches, 1)
stc := snmpTranslateCaches[oid]
require.NotNil(t, stc)
require.Equal(t, mibName, stc.mibName)
require.Equal(t, oidNum, stc.oidNum)
require.Equal(t, oidText, stc.oidText)
require.Equal(t, conversion, stc.conversion)
require.Equal(t, err, stc.err)
}
func TestSnmpTranslateCache_hit(t *testing.T) {
snmpTranslateCaches = map[string]snmpTranslateCache{
"foo": {
mibName: "a",
oidNum: "b",
oidText: "c",
conversion: "d",
},
}
mibName, oidNum, oidText, conversion, err := NewNetsnmpTranslator(testutil.Logger{}).SnmpTranslate("foo")
require.Equal(t, "a", mibName)
require.Equal(t, "b", oidNum)
require.Equal(t, "c", oidText)
require.Equal(t, "d", conversion)
require.NoError(t, err)
snmpTranslateCaches = nil
}
func TestSnmpTableCache_miss(t *testing.T) {
snmpTableCaches = nil
oid := ".1.0.0.0"
mibName, oidNum, oidText, fields, err := NewNetsnmpTranslator(testutil.Logger{}).SnmpTable(oid)
require.Len(t, snmpTableCaches, 1)
stc := snmpTableCaches[oid]
require.NotNil(t, stc)
require.Equal(t, mibName, stc.mibName)
require.Equal(t, oidNum, stc.oidNum)
require.Equal(t, oidText, stc.oidText)
require.Equal(t, fields, stc.fields)
require.Equal(t, err, stc.err)
}
func TestSnmpTableCache_hit(t *testing.T) {
snmpTableCaches = map[string]snmpTableCache{
"foo": {
mibName: "a",
oidNum: "b",
oidText: "c",
fields: []Field{{Name: "d"}},
},
}
mibName, oidNum, oidText, fields, err := NewNetsnmpTranslator(testutil.Logger{}).SnmpTable("foo")
require.Equal(t, "a", mibName)
require.Equal(t, "b", oidNum)
require.Equal(t, "c", oidText)
require.Equal(t, []Field{{Name: "d"}}, fields)
require.NoError(t, err)
}

201
internal/snmp/wrapper.go Normal file
View file

@ -0,0 +1,201 @@
package snmp
import (
"errors"
"fmt"
"net/url"
"strconv"
"strings"
"time"
"github.com/gosnmp/gosnmp"
)
// Connection is an interface which wraps a *gosnmp.GoSNMP object.
// We interact through an interface so we can mock it out in tests.
type Connection interface {
Host() string
// BulkWalkAll(string) ([]gosnmp.SnmpPDU, error)
Walk(string, gosnmp.WalkFunc) error
Get(oids []string) (*gosnmp.SnmpPacket, error)
Reconnect() error
}
// GosnmpWrapper wraps a *gosnmp.GoSNMP object so we can use it as a snmpConnection.
type GosnmpWrapper struct {
*gosnmp.GoSNMP
}
// Host returns the value of GoSNMP.Target.
func (gs GosnmpWrapper) Host() string {
return gs.Target
}
// Walk wraps GoSNMP.Walk() or GoSNMP.BulkWalk(), depending on whether the
// connection is using SNMPv1 or newer.
func (gs GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error {
if gs.Version == gosnmp.Version1 {
return gs.GoSNMP.Walk(oid, fn)
}
return gs.GoSNMP.BulkWalk(oid, fn)
}
func NewWrapper(s ClientConfig) (GosnmpWrapper, error) {
gs := GosnmpWrapper{&gosnmp.GoSNMP{}}
gs.Timeout = time.Duration(s.Timeout)
gs.Retries = s.Retries
gs.UseUnconnectedUDPSocket = s.UnconnectedUDPSocket
switch s.Version {
case 3:
gs.Version = gosnmp.Version3
case 2, 0:
gs.Version = gosnmp.Version2c
case 1:
gs.Version = gosnmp.Version1
default:
return GosnmpWrapper{}, errors.New("invalid version")
}
if s.Version < 3 {
if s.Community == "" {
gs.Community = "public"
} else {
gs.Community = s.Community
}
}
gs.MaxRepetitions = s.MaxRepetitions
if s.Version == 3 {
gs.ContextName = s.ContextName
sp := &gosnmp.UsmSecurityParameters{}
gs.SecurityParameters = sp
gs.SecurityModel = gosnmp.UserSecurityModel
switch strings.ToLower(s.SecLevel) {
case "noauthnopriv", "":
gs.MsgFlags = gosnmp.NoAuthNoPriv
case "authnopriv":
gs.MsgFlags = gosnmp.AuthNoPriv
case "authpriv":
gs.MsgFlags = gosnmp.AuthPriv
default:
return GosnmpWrapper{}, errors.New("invalid secLevel")
}
sp.UserName = s.SecName
switch strings.ToLower(s.AuthProtocol) {
case "md5":
sp.AuthenticationProtocol = gosnmp.MD5
case "sha":
sp.AuthenticationProtocol = gosnmp.SHA
case "sha224":
sp.AuthenticationProtocol = gosnmp.SHA224
case "sha256":
sp.AuthenticationProtocol = gosnmp.SHA256
case "sha384":
sp.AuthenticationProtocol = gosnmp.SHA384
case "sha512":
sp.AuthenticationProtocol = gosnmp.SHA512
case "":
sp.AuthenticationProtocol = gosnmp.NoAuth
default:
return GosnmpWrapper{}, errors.New("invalid authProtocol")
}
if !s.AuthPassword.Empty() {
p, err := s.AuthPassword.Get()
if err != nil {
return GosnmpWrapper{}, fmt.Errorf("getting authentication password failed: %w", err)
}
sp.AuthenticationPassphrase = p.String()
p.Destroy()
}
switch strings.ToLower(s.PrivProtocol) {
case "des":
sp.PrivacyProtocol = gosnmp.DES
case "aes":
sp.PrivacyProtocol = gosnmp.AES
case "aes192":
sp.PrivacyProtocol = gosnmp.AES192
case "aes192c":
sp.PrivacyProtocol = gosnmp.AES192C
case "aes256":
sp.PrivacyProtocol = gosnmp.AES256
case "aes256c":
sp.PrivacyProtocol = gosnmp.AES256C
case "":
sp.PrivacyProtocol = gosnmp.NoPriv
default:
return GosnmpWrapper{}, errors.New("invalid privProtocol")
}
if !s.PrivPassword.Empty() {
p, err := s.PrivPassword.Get()
if err != nil {
return GosnmpWrapper{}, fmt.Errorf("getting private password failed: %w", err)
}
sp.PrivacyPassphrase = p.String()
p.Destroy()
}
sp.AuthoritativeEngineID = s.EngineID
sp.AuthoritativeEngineBoots = s.EngineBoots
sp.AuthoritativeEngineTime = s.EngineTime
}
return gs, nil
}
// SetAgent takes a url (scheme://host:port) and sets the wrapped
// GoSNMP struct's corresponding fields. This shouldn't be called
// after using the wrapped GoSNMP struct, for example after
// connecting.
func (gs *GosnmpWrapper) SetAgent(agent string) error {
if !strings.Contains(agent, "://") {
agent = "udp://" + agent
}
u, err := url.Parse(agent)
if err != nil {
return err
}
// Only allow udp{4,6} and tcp{4,6}.
// Allowing ip{4,6} does not make sense as specifying a port
// requires the specification of a protocol.
// gosnmp does not handle these errors well, which is why
// they can result in cryptic errors by net.Dial.
switch u.Scheme {
case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6":
gs.Transport = u.Scheme
default:
return fmt.Errorf("unsupported scheme: %v", u.Scheme)
}
gs.Target = u.Hostname()
portStr := u.Port()
if portStr == "" {
portStr = "161"
}
port, err := strconv.ParseUint(portStr, 10, 16)
if err != nil {
return fmt.Errorf("parsing port: %w", err)
}
gs.Port = uint16(port)
return nil
}
func (gs GosnmpWrapper) Reconnect() error {
if gs.Conn == nil {
return gs.Connect()
}
return nil
}

View file

@ -0,0 +1,89 @@
package snmp
import "github.com/gosnmp/gosnmp"
type testSNMPConnection struct {
host string
values map[string]interface{}
}
func (tsc *testSNMPConnection) Host() string {
return tsc.host
}
func (tsc *testSNMPConnection) Get(oids []string) (*gosnmp.SnmpPacket, error) {
sp := &gosnmp.SnmpPacket{}
for _, oid := range oids {
v, ok := tsc.values[oid]
if !ok {
sp.Variables = append(sp.Variables, gosnmp.SnmpPDU{
Name: oid,
Type: gosnmp.NoSuchObject,
})
continue
}
sp.Variables = append(sp.Variables, gosnmp.SnmpPDU{
Name: oid,
Value: v,
})
}
return sp, nil
}
func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error {
for void, v := range tsc.values {
if void == oid || (len(void) > len(oid) && void[:len(oid)+1] == oid+".") {
if err := wf(gosnmp.SnmpPDU{
Name: void,
Value: v,
}); err != nil {
return err
}
}
}
return nil
}
func (*testSNMPConnection) Reconnect() error {
return nil
}
var tsc = &testSNMPConnection{
host: "tsc",
values: map[string]interface{}{
".1.0.0.0.1.1.0": "foo",
".1.0.0.0.1.1.1": []byte("bar"),
".1.0.0.0.1.1.2": []byte(""),
".1.0.0.0.1.102": "bad",
".1.0.0.0.1.2.0": 1,
".1.0.0.0.1.2.1": 2,
".1.0.0.0.1.2.2": 0,
".1.0.0.0.1.3.0": "0.123",
".1.0.0.0.1.3.1": "0.456",
".1.0.0.0.1.3.2": "0.000",
".1.0.0.0.1.3.3": "9.999",
".1.0.0.0.1.5.0": 123456,
".1.0.0.0.1.6.0": ".1.0.0.0.1.7",
".1.0.0.1.1": "baz",
".1.0.0.1.2": 234,
".1.0.0.1.3": []byte("byte slice"),
".1.0.0.2.1.5.0.9.9": 11,
".1.0.0.2.1.5.1.9.9": 22,
".1.0.0.3.1.1.10": "instance",
".1.0.0.3.1.1.11": "instance2",
".1.0.0.3.1.1.12": "instance3",
".1.0.0.3.1.2.10": 10,
".1.0.0.3.1.2.11": 20,
".1.0.0.3.1.2.12": 20,
".1.0.0.3.1.3.10": 1,
".1.0.0.3.1.3.11": 2,
".1.0.0.3.1.3.12": 3,
".1.3.6.1.2.1.3.1.1.1.0": "foo",
".1.3.6.1.2.1.3.1.1.1.1": []byte("bar"),
".1.3.6.1.2.1.3.1.1.1.2": []byte(""),
".1.3.6.1.2.1.3.1.1.102": "bad",
".1.3.6.1.2.1.3.1.1.2.0": 1,
".1.3.6.1.2.1.3.1.1.2.1": 2,
".1.3.6.1.2.1.3.1.1.2.2": 0,
".1.3.6.1.2.1.3.1.1.3.0": "1.3.6.1.2.1.3.1.1.3",
".1.3.6.1.2.1.3.1.1.5.0": 123456,
},
}

View file

@ -0,0 +1,88 @@
package templating
import (
"sort"
"strings"
)
const (
// DefaultSeparator is the default separation character to use when separating template parts.
DefaultSeparator = "."
)
// Engine uses a Matcher to retrieve the appropriate template and applies the template
// to the input string
type Engine struct {
joiner string
matcher *matcher
}
// Apply extracts the template fields from the given line and returns the measurement
// name, tags and field name
//
//nolint:revive //function-result-limit conditionally 4 return results allowed
func (e *Engine) Apply(line string) (measurementName string, tags map[string]string, field string, err error) {
return e.matcher.match(line).Apply(line, e.joiner)
}
// NewEngine creates a new templating engine
func NewEngine(joiner string, defaultTemplate *Template, templates []string) (*Engine, error) {
engine := Engine{
joiner: joiner,
matcher: newMatcher(defaultTemplate),
}
templateSpecs := parseTemplateSpecs(templates)
for _, templateSpec := range templateSpecs {
if err := engine.matcher.addSpec(templateSpec); err != nil {
return nil, err
}
}
return &engine, nil
}
func parseTemplateSpecs(templates []string) templateSpecs {
tmplts := templateSpecs{}
for _, pattern := range templates {
tmplt := templateSpec{
separator: DefaultSeparator,
}
// Format is [separator] [filter] <template> [tag1=value1,tag2=value2]
parts := strings.Fields(pattern)
partsLength := len(parts)
if partsLength < 1 {
// ignore
continue
}
if partsLength == 1 {
tmplt.template = pattern
} else if partsLength == 4 {
tmplt.separator = parts[0]
tmplt.filter = parts[1]
tmplt.template = parts[2]
tmplt.tagstring = parts[3]
} else {
hasTagstring := strings.Contains(parts[partsLength-1], "=")
if hasTagstring {
tmplt.tagstring = parts[partsLength-1]
tmplt.template = parts[partsLength-2]
if partsLength == 3 {
tmplt.filter = parts[0]
}
} else {
tmplt.template = parts[partsLength-1]
if partsLength == 2 {
tmplt.filter = parts[0]
} else { // length == 3
tmplt.separator = parts[0]
tmplt.filter = parts[1]
}
}
}
tmplts = append(tmplts, tmplt)
}
sort.Sort(tmplts)
return tmplts
}

View file

@ -0,0 +1,77 @@
package templating
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestEngineAlternateSeparator(t *testing.T) {
defaultTemplate, err := NewDefaultTemplateWithPattern("measurement*")
require.NoError(t, err)
engine, err := NewEngine("_", defaultTemplate, []string{
"/ /*/*/* /measurement/origin/measurement*",
})
require.NoError(t, err)
name, tags, field, err := engine.Apply("/telegraf/host01/cpu")
require.NoError(t, err)
require.Equal(t, "telegraf_cpu", name)
require.Equal(t, map[string]string{
"origin": "host01",
}, tags)
require.Empty(t, field)
}
func TestEngineWithWildcardTemplate(t *testing.T) {
var (
defaultTmpl, err = NewDefaultTemplateWithPattern("measurement*")
templates = []string{
"taskmanagerTask.alarm-detector.Assign.alarmDefinitionId metricsType.process.nodeId.x.alarmDefinitionId.measurement.field rule=1",
"taskmanagerTask.*.*.*.* metricsType.process.nodeId.measurement rule=2",
}
)
require.NoError(t, err)
engine, err := NewEngine(".", defaultTmpl, templates)
require.NoError(t, err)
for _, testCase := range []struct {
line string
measurement string
field string
tags map[string]string
}{
{
line: "taskmanagerTask.alarm-detector.Assign.alarmDefinitionId.timeout_errors.duration.p75",
measurement: "duration",
field: "p75",
tags: map[string]string{
"metricsType": "taskmanagerTask",
"process": "alarm-detector",
"nodeId": "Assign",
"x": "alarmDefinitionId",
"alarmDefinitionId": "timeout_errors",
"rule": "1",
},
},
{
line: "taskmanagerTask.alarm-detector.Assign.numRecordsInPerSecond.m5_rate",
measurement: "numRecordsInPerSecond",
tags: map[string]string{
"metricsType": "taskmanagerTask",
"process": "alarm-detector",
"nodeId": "Assign",
"rule": "2",
},
},
} {
t.Run(testCase.line, func(t *testing.T) {
measurement, tags, field, err := engine.Apply(testCase.line)
require.NoError(t, err)
require.Equal(t, testCase.measurement, measurement)
require.Equal(t, testCase.field, field)
require.Equal(t, testCase.tags, tags)
})
}
}

View file

@ -0,0 +1,58 @@
package templating
import (
"strings"
)
// matcher determines which template should be applied to a given metric
// based on a filter tree.
type matcher struct {
root *node
defaultTemplate *Template
}
// newMatcher creates a new matcher.
func newMatcher(defaultTemplate *Template) *matcher {
return &matcher{
root: &node{},
defaultTemplate: defaultTemplate,
}
}
func (m *matcher) addSpec(tmplt templateSpec) error {
// Parse out the default tags specific to this template
tags := make(map[string]string)
if tmplt.tagstring != "" {
for _, kv := range strings.Split(tmplt.tagstring, ",") {
parts := strings.Split(kv, "=")
tags[parts[0]] = parts[1]
}
}
tmpl, err := NewTemplate(tmplt.separator, tmplt.template, tags)
if err != nil {
return err
}
m.add(tmplt.filter, tmpl)
return nil
}
// add inserts the template in the filter tree based the given filter
func (m *matcher) add(filter string, template *Template) {
if filter == "" {
m.defaultTemplate = template
m.root.separator = template.separator
return
}
m.root.insert(filter, template)
}
// match returns the template that matches the given measurement line.
// If no template matches, the default template is returned.
func (m *matcher) match(line string) *Template {
tmpl := m.root.search(line)
if tmpl != nil {
return tmpl
}
return m.defaultTemplate
}

136
internal/templating/node.go Normal file
View file

@ -0,0 +1,136 @@
package templating
import (
"sort"
"strings"
)
// node is an item in a sorted k-ary tree of filter parts. Each child is sorted by its part value.
// The special value of "*", is always sorted last.
type node struct {
separator string
value string
children nodes
template *Template
}
// insert inserts the given string template into the tree. The filter string is separated
// on the template separator and each part is used as the path in the tree.
func (n *node) insert(filter string, template *Template) {
n.separator = template.separator
n.recursiveInsert(strings.Split(filter, n.separator), template)
}
// recursiveInsert does the actual recursive insertion
func (n *node) recursiveInsert(values []string, template *Template) {
// Add the end, set the template
if len(values) == 0 {
n.template = template
return
}
// See if the current element already exists in the tree. If so, insert the
// into that sub-tree
for _, v := range n.children {
if v.value == values[0] {
v.recursiveInsert(values[1:], template)
return
}
}
// New element, add it to the tree and sort the children
newNode := &node{value: values[0]}
n.children = append(n.children, newNode)
sort.Sort(&n.children)
// Now insert the rest of the tree into the new element
newNode.recursiveInsert(values[1:], template)
}
// search searches for a template matching the input string
func (n *node) search(line string) *Template {
separator := n.separator
return n.recursiveSearch(strings.Split(line, separator))
}
// recursiveSearch performs the actual recursive search
func (n *node) recursiveSearch(lineParts []string) *Template {
// nothing to search
if len(lineParts) == 0 || len(n.children) == 0 {
return n.template
}
var (
hasWildcard bool
length = len(n.children)
)
// exclude last child from search if it is a wildcard. sort.Search expects
// a lexicographically sorted set of children and we have artificially sorted
// wildcards to the end of the child set
// wildcards will be searched separately if no exact match is found
if hasWildcard = n.children[length-1].value == "*"; hasWildcard {
length--
}
i := sort.Search(length, func(i int) bool {
return n.children[i].value >= lineParts[0]
})
// given an exact match is found within children set
if i < length && n.children[i].value == lineParts[0] {
// descend into the matching node
if tmpl := n.children[i].recursiveSearch(lineParts[1:]); tmpl != nil {
// given a template is found return it
return tmpl
}
}
// given no template is found and the last child is a wildcard
if hasWildcard {
// also search the wildcard child node
return n.children[length].recursiveSearch(lineParts[1:])
}
// fallback to returning template at this node
return n.template
}
// nodes is simply an array of nodes implementing the sorting interface.
type nodes []*node
// Less returns a boolean indicating whether the filter at position j
// is less than the filter at position k. Filters are order by string
// comparison of each component parts. A wildcard value "*" is never
// less than a non-wildcard value.
//
// For example, the filters:
//
// "*.*"
// "servers.*"
// "servers.localhost"
// "*.localhost"
//
// Would be sorted as:
//
// "servers.localhost"
// "servers.*"
// "*.localhost"
// "*.*"
func (n *nodes) Less(j, k int) bool {
if (*n)[j].value == "*" && (*n)[k].value != "*" {
return false
}
if (*n)[j].value != "*" && (*n)[k].value == "*" {
return true
}
return (*n)[j].value < (*n)[k].value
}
// Swap swaps two elements of the array
func (n *nodes) Swap(i, j int) { (*n)[i], (*n)[j] = (*n)[j], (*n)[i] }
// Len returns the length of the array
func (n *nodes) Len() int { return len(*n) }

View file

@ -0,0 +1,142 @@
package templating
import (
"fmt"
"strings"
)
// Template represents a pattern and tags to map a metric string to an influxdb Point
type Template struct {
separator string
parts []string
defaultTags map[string]string
greedyField bool
greedyMeasurement bool
}
// Apply extracts the template fields from the given line and returns the measurement
// name, tags and field name
//
//nolint:revive //function-result-limit conditionally 4 return results allowed
func (t *Template) Apply(line string, joiner string) (measurementName string, tags map[string]string, field string, err error) {
allFields := strings.Split(line, t.separator)
var (
measurements []string
tagsMap = make(map[string][]string)
fields []string
)
// Set any default tags
for k, v := range t.defaultTags {
tagsMap[k] = append(tagsMap[k], v)
}
// See if an invalid combination has been specified in the template:
for _, tag := range t.parts {
if tag == "measurement*" {
t.greedyMeasurement = true
} else if tag == "field*" {
t.greedyField = true
}
}
if t.greedyField && t.greedyMeasurement {
return "", nil, "",
fmt.Errorf("either 'field*' or 'measurement*' can be used in each "+
"template (but not both together): %q",
strings.Join(t.parts, joiner))
}
for i, tag := range t.parts {
if i >= len(allFields) {
continue
}
if tag == "" {
continue
}
switch tag {
case "measurement":
measurements = append(measurements, allFields[i])
case "field":
fields = append(fields, allFields[i])
case "field*":
fields = append(fields, allFields[i:]...)
case "measurement*":
measurements = append(measurements, allFields[i:]...)
default:
tagsMap[tag] = append(tagsMap[tag], allFields[i])
}
}
// Convert to map of strings.
tags = make(map[string]string)
for k, values := range tagsMap {
tags[k] = strings.Join(values, joiner)
}
return strings.Join(measurements, joiner), tags, strings.Join(fields, joiner), nil
}
func NewDefaultTemplateWithPattern(pattern string) (*Template, error) {
return NewTemplate(DefaultSeparator, pattern, nil)
}
// NewTemplate returns a new template ensuring it has a measurement specified.
func NewTemplate(separator, pattern string, defaultTags map[string]string) (*Template, error) {
parts := strings.Split(pattern, separator)
hasMeasurement := false
template := &Template{
separator: separator,
parts: parts,
defaultTags: defaultTags,
}
for _, part := range parts {
if strings.HasPrefix(part, "measurement") {
hasMeasurement = true
}
if part == "measurement*" {
template.greedyMeasurement = true
} else if part == "field*" {
template.greedyField = true
}
}
if !hasMeasurement {
return nil, fmt.Errorf("no measurement specified for template. %q", pattern)
}
return template, nil
}
// templateSpec is a template string split in its constituent parts
type templateSpec struct {
separator string
filter string
template string
tagstring string
}
// templateSpecs is simply an array of template specs implementing the sorting interface
type templateSpecs []templateSpec
// Less reports whether the element with
// index j should sort before the element with index k.
func (e templateSpecs) Less(j, k int) bool {
jlen := len(e[j].filter)
klen := len(e[k].filter)
if jlen == 0 && klen != 0 {
return true
}
if klen == 0 && jlen != 0 {
return false
}
return strings.Count(e[j].template, e[j].separator) <
strings.Count(e[k].template, e[k].separator)
}
// Swap swaps the elements with indexes i and j.
func (e templateSpecs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
// Len is the number of elements in the collection.
func (e templateSpecs) Len() int { return len(e) }

View file

@ -0,0 +1,14 @@
package templating
import "testing"
func BenchmarkTemplateLess(b *testing.B) {
a := templateSpec{
template: "aa|bb|cc|dd|ee|ff",
separator: "|",
}
specs := templateSpecs{a, a}
for i := 0; i < b.N; i++ {
specs.Less(0, 1)
}
}

View file

@ -0,0 +1,763 @@
package internal
import (
"fmt"
"math"
"strconv"
"strings"
)
var ErrOutOfRange = strconv.ErrRange
func ToFloat64(value interface{}) (float64, error) {
switch v := value.(type) {
case string:
return strconv.ParseFloat(v, 64)
case []byte:
return strconv.ParseFloat(string(v), 64)
case fmt.Stringer:
return strconv.ParseFloat(v.String(), 64)
case bool:
if v {
return 1, nil
}
return 0, nil
case int:
return float64(v), nil
case int8:
return float64(v), nil
case int16:
return float64(v), nil
case int32:
return float64(v), nil
case int64:
return float64(v), nil
case uint:
return float64(v), nil
case uint8:
return float64(v), nil
case uint16:
return float64(v), nil
case uint32:
return float64(v), nil
case uint64:
return float64(v), nil
case float32:
return float64(v), nil
case float64:
return v, nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToFloat32(value interface{}) (float32, error) {
switch v := value.(type) {
case string:
x, err := strconv.ParseFloat(v, 32)
return float32(x), err
case []byte:
x, err := strconv.ParseFloat(string(v), 32)
return float32(x), err
case fmt.Stringer:
x, err := strconv.ParseFloat(v.String(), 32)
return float32(x), err
case bool:
if v {
return 1, nil
}
return 0, nil
case int:
return float32(v), nil
case int8:
return float32(v), nil
case int16:
return float32(v), nil
case int32:
return float32(v), nil
case int64:
return float32(v), nil
case uint:
return float32(v), nil
case uint8:
return float32(v), nil
case uint16:
return float32(v), nil
case uint32:
return float32(v), nil
case uint64:
return float32(v), nil
case float32:
return v, nil
case float64:
if v < -math.MaxFloat32 || v > math.MaxFloat32 {
return float32(v), ErrOutOfRange
}
return float32(v), nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToUint64(value interface{}) (uint64, error) {
switch v := value.(type) {
case string:
if strings.HasPrefix(v, "0x") {
return strconv.ParseUint(strings.TrimPrefix(v, "0x"), 16, 64)
}
return strconv.ParseUint(v, 10, 64)
case []byte:
return strconv.ParseUint(string(v), 10, 64)
case fmt.Stringer:
return strconv.ParseUint(v.String(), 10, 64)
case bool:
if v {
return 1, nil
}
return 0, nil
case int:
if v < 0 {
return uint64(v), ErrOutOfRange
}
return uint64(v), nil
case int8:
if v < 0 {
return uint64(v), ErrOutOfRange
}
return uint64(v), nil
case int16:
if v < 0 {
return uint64(v), ErrOutOfRange
}
return uint64(v), nil
case int32:
if v < 0 {
return uint64(v), ErrOutOfRange
}
return uint64(v), nil
case int64:
if v < 0 {
return uint64(v), ErrOutOfRange
}
return uint64(v), nil
case uint:
return uint64(v), nil
case uint8:
return uint64(v), nil
case uint16:
return uint64(v), nil
case uint32:
return uint64(v), nil
case uint64:
return v, nil
case float32:
if v < 0 || v > math.MaxUint64 {
return uint64(v), ErrOutOfRange
}
return uint64(v), nil
case float64:
if v < 0 || v > math.MaxUint64 {
return uint64(v), ErrOutOfRange
}
return uint64(v), nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToUint32(value interface{}) (uint32, error) {
switch v := value.(type) {
case string:
if strings.HasPrefix(v, "0x") {
x, err := strconv.ParseUint(strings.TrimPrefix(v, "0x"), 16, 32)
return uint32(x), err
}
x, err := strconv.ParseUint(v, 10, 32)
return uint32(x), err
case []byte:
x, err := strconv.ParseUint(string(v), 10, 32)
return uint32(x), err
case fmt.Stringer:
x, err := strconv.ParseUint(v.String(), 10, 32)
return uint32(x), err
case bool:
if v {
return 1, nil
}
return 0, nil
case int:
if v < 0 || uint64(v) > math.MaxUint32 {
return uint32(v), ErrOutOfRange
}
return uint32(v), nil
case int8:
if v < 0 {
return uint32(v), ErrOutOfRange
}
return uint32(v), nil
case int16:
if v < 0 {
return uint32(v), ErrOutOfRange
}
return uint32(v), nil
case int32:
if v < 0 {
return uint32(v), ErrOutOfRange
}
return uint32(v), nil
case int64:
if v < 0 || v > math.MaxUint32 {
return uint32(v), ErrOutOfRange
}
return uint32(v), nil
case uint:
return uint32(v), nil
case uint8:
return uint32(v), nil
case uint16:
return uint32(v), nil
case uint32:
return v, nil
case uint64:
if v > math.MaxUint32 {
return uint32(v), ErrOutOfRange
}
return uint32(v), nil
case float32:
if v < 0 || v > math.MaxUint32 {
return uint32(v), ErrOutOfRange
}
return uint32(v), nil
case float64:
if v < 0 || v > math.MaxUint32 {
return uint32(v), ErrOutOfRange
}
return uint32(v), nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToUint16(value interface{}) (uint16, error) {
switch v := value.(type) {
case string:
if strings.HasPrefix(v, "0x") {
x, err := strconv.ParseUint(strings.TrimPrefix(v, "0x"), 16, 16)
return uint16(x), err
}
x, err := strconv.ParseUint(v, 10, 32)
return uint16(x), err
case []byte:
x, err := strconv.ParseUint(string(v), 10, 32)
return uint16(x), err
case fmt.Stringer:
x, err := strconv.ParseUint(v.String(), 10, 32)
return uint16(x), err
case bool:
if v {
return 1, nil
}
return 0, nil
case int:
if v < 0 || v > math.MaxUint16 {
return uint16(v), ErrOutOfRange
}
return uint16(v), nil
case int8:
if v < 0 {
return uint16(v), ErrOutOfRange
}
return uint16(v), nil
case int16:
if v < 0 {
return uint16(v), ErrOutOfRange
}
return uint16(v), nil
case int32:
if v < 0 || v > math.MaxUint16 {
return uint16(v), ErrOutOfRange
}
return uint16(v), nil
case int64:
if v < 0 || v > math.MaxUint16 {
return uint16(v), ErrOutOfRange
}
return uint16(v), nil
case uint:
return uint16(v), nil
case uint8:
return uint16(v), nil
case uint16:
return v, nil
case uint32:
if v > math.MaxUint16 {
return uint16(v), ErrOutOfRange
}
return uint16(v), nil
case uint64:
if v > math.MaxUint16 {
return uint16(v), ErrOutOfRange
}
return uint16(v), nil
case float32:
if v < 0 || v > math.MaxUint16 {
return uint16(v), ErrOutOfRange
}
return uint16(v), nil
case float64:
if v < 0 || v > math.MaxUint16 {
return uint16(v), ErrOutOfRange
}
return uint16(v), nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToUint8(value interface{}) (uint8, error) {
switch v := value.(type) {
case string:
if strings.HasPrefix(v, "0x") {
x, err := strconv.ParseUint(strings.TrimPrefix(v, "0x"), 16, 8)
return uint8(x), err
}
x, err := strconv.ParseUint(v, 10, 32)
return uint8(x), err
case []byte:
x, err := strconv.ParseUint(string(v), 10, 32)
return uint8(x), err
case fmt.Stringer:
x, err := strconv.ParseUint(v.String(), 10, 32)
return uint8(x), err
case bool:
if v {
return 1, nil
}
return 0, nil
case int:
if v < 0 || v > math.MaxUint8 {
return uint8(v), ErrOutOfRange
}
return uint8(v), nil
case int8:
if v < 0 {
return uint8(v), ErrOutOfRange
}
return uint8(v), nil
case int16:
if v < 0 || v > math.MaxUint8 {
return uint8(v), ErrOutOfRange
}
return uint8(v), nil
case int32:
if v < 0 || v > math.MaxUint8 {
return uint8(v), ErrOutOfRange
}
return uint8(v), nil
case int64:
if v < 0 || v > math.MaxUint8 {
return uint8(v), ErrOutOfRange
}
return uint8(v), nil
case uint:
return uint8(v), nil
case uint8:
return v, nil
case uint16:
if v > math.MaxUint8 {
return uint8(v), ErrOutOfRange
}
return uint8(v), nil
case uint32:
if v > math.MaxUint8 {
return uint8(v), ErrOutOfRange
}
return uint8(v), nil
case uint64:
if v > math.MaxUint8 {
return uint8(v), ErrOutOfRange
}
return uint8(v), nil
case float32:
if v < 0 || v > math.MaxUint8 {
return uint8(v), ErrOutOfRange
}
return uint8(v), nil
case float64:
if v < 0 || v > math.MaxUint8 {
return uint8(v), ErrOutOfRange
}
return uint8(v), nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToInt64(value interface{}) (int64, error) {
switch v := value.(type) {
case string:
if strings.HasPrefix(v, "0x") {
return strconv.ParseInt(strings.TrimPrefix(v, "0x"), 16, 64)
}
return strconv.ParseInt(v, 10, 64)
case []byte:
return strconv.ParseInt(string(v), 10, 64)
case fmt.Stringer:
return strconv.ParseInt(v.String(), 10, 64)
case bool:
if v {
return 1, nil
}
return 0, nil
case int:
return int64(v), nil
case int8:
return int64(v), nil
case int16:
return int64(v), nil
case int32:
return int64(v), nil
case int64:
return v, nil
case uint:
if uint64(v) > math.MaxInt64 {
return int64(v), ErrOutOfRange
}
return int64(v), nil
case uint8:
return int64(v), nil
case uint16:
return int64(v), nil
case uint32:
return int64(v), nil
case uint64:
if v > math.MaxInt64 {
return int64(v), ErrOutOfRange
}
return int64(v), nil
case float32:
if v < math.MinInt64 || v > math.MaxInt64 {
return int64(v), ErrOutOfRange
}
return int64(v), nil
case float64:
if v < math.MinInt64 || v > math.MaxInt64 {
return int64(v), ErrOutOfRange
}
return int64(v), nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToInt32(value interface{}) (int32, error) {
switch v := value.(type) {
case string:
if strings.HasPrefix(v, "0x") {
x, err := strconv.ParseInt(strings.TrimPrefix(v, "0x"), 16, 32)
return int32(x), err
}
x, err := strconv.ParseInt(v, 10, 32)
return int32(x), err
case []byte:
x, err := strconv.ParseInt(string(v), 10, 32)
return int32(x), err
case fmt.Stringer:
x, err := strconv.ParseInt(v.String(), 10, 32)
return int32(x), err
case bool:
if v {
return 1, nil
}
return 0, nil
case int:
if int64(v) < math.MinInt32 || int64(v) > math.MaxInt32 {
return int32(v), ErrOutOfRange
}
return int32(v), nil
case int8:
return int32(v), nil
case int16:
return int32(v), nil
case int32:
return v, nil
case int64:
if v < math.MinInt32 || v > math.MaxInt32 {
return int32(v), ErrOutOfRange
}
return int32(v), nil
case uint:
if v > math.MaxInt32 {
return int32(v), ErrOutOfRange
}
return int32(v), nil
case uint8:
return int32(v), nil
case uint16:
return int32(v), nil
case uint32:
if v > math.MaxInt32 {
return int32(v), ErrOutOfRange
}
return int32(v), nil
case uint64:
if v > math.MaxInt32 {
return int32(v), ErrOutOfRange
}
return int32(v), nil
case float32:
if v < math.MinInt32 || v > math.MaxInt32 {
return int32(v), ErrOutOfRange
}
return int32(v), nil
case float64:
if v < math.MinInt32 || v > math.MaxInt32 {
return int32(v), ErrOutOfRange
}
return int32(v), nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToInt16(value interface{}) (int16, error) {
switch v := value.(type) {
case string:
if strings.HasPrefix(v, "0x") {
x, err := strconv.ParseInt(strings.TrimPrefix(v, "0x"), 16, 16)
return int16(x), err
}
x, err := strconv.ParseInt(v, 10, 32)
return int16(x), err
case []byte:
x, err := strconv.ParseInt(string(v), 10, 32)
return int16(x), err
case fmt.Stringer:
x, err := strconv.ParseInt(v.String(), 10, 32)
return int16(x), err
case bool:
if v {
return 1, nil
}
return 0, nil
case int:
return int16(v), nil
case int8:
return int16(v), nil
case int16:
return v, nil
case int32:
if v < math.MinInt16 || v > math.MaxInt16 {
return int16(v), ErrOutOfRange
}
return int16(v), nil
case int64:
if v < math.MinInt16 || v > math.MaxInt16 {
return int16(v), ErrOutOfRange
}
return int16(v), nil
case uint:
if v > math.MaxInt16 {
return int16(v), ErrOutOfRange
}
return int16(v), nil
case uint8:
return int16(v), nil
case uint16:
if v > math.MaxInt16 {
return int16(v), ErrOutOfRange
}
return int16(v), nil
case uint32:
if v > math.MaxInt16 {
return int16(v), ErrOutOfRange
}
return int16(v), nil
case uint64:
if v > math.MaxInt16 {
return int16(v), ErrOutOfRange
}
return int16(v), nil
case float32:
if v < math.MinInt16 || v > math.MaxInt16 {
return int16(v), ErrOutOfRange
}
return int16(v), nil
case float64:
if v < math.MinInt16 || v > math.MaxInt16 {
return int16(v), ErrOutOfRange
}
return int16(v), nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToInt8(value interface{}) (int8, error) {
switch v := value.(type) {
case string:
if strings.HasPrefix(v, "0x") {
x, err := strconv.ParseInt(strings.TrimPrefix(v, "0x"), 16, 8)
return int8(x), err
}
x, err := strconv.ParseInt(v, 10, 32)
return int8(x), err
case []byte:
x, err := strconv.ParseInt(string(v), 10, 32)
return int8(x), err
case fmt.Stringer:
x, err := strconv.ParseInt(v.String(), 10, 32)
return int8(x), err
case bool:
if v {
return 1, nil
}
return 0, nil
case int:
return int8(v), nil
case int8:
return v, nil
case int16:
if v < math.MinInt8 || v > math.MaxInt8 {
return int8(v), ErrOutOfRange
}
return int8(v), nil
case int32:
if v < math.MinInt8 || v > math.MaxInt8 {
return int8(v), ErrOutOfRange
}
return int8(v), nil
case int64:
if v < math.MinInt8 || v > math.MaxInt8 {
return int8(v), ErrOutOfRange
}
return int8(v), nil
case uint:
if v > math.MaxInt8 {
return int8(v), ErrOutOfRange
}
return int8(v), nil
case uint8:
if v > math.MaxInt8 {
return int8(v), ErrOutOfRange
}
return int8(v), nil
case uint16:
if v > math.MaxInt8 {
return int8(v), ErrOutOfRange
}
return int8(v), nil
case uint32:
if v > math.MaxInt8 {
return int8(v), ErrOutOfRange
}
return int8(v), nil
case uint64:
if v > math.MaxInt8 {
return int8(v), ErrOutOfRange
}
return int8(v), nil
case float32:
if v < math.MinInt8 || v > math.MaxInt8 {
return int8(v), ErrOutOfRange
}
return int8(v), nil
case float64:
if v < math.MinInt8 || v > math.MaxInt8 {
return int8(v), ErrOutOfRange
}
return int8(v), nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToString(value interface{}) (string, error) {
switch v := value.(type) {
case string:
return v, nil
case []byte:
return string(v), nil
case int:
return strconv.FormatInt(int64(v), 10), nil
case int8:
return strconv.FormatInt(int64(v), 10), nil
case int16:
return strconv.FormatInt(int64(v), 10), nil
case int32:
return strconv.FormatInt(int64(v), 10), nil
case int64:
return strconv.FormatInt(v, 10), nil
case uint:
return strconv.FormatUint(uint64(v), 10), nil
case uint8:
return strconv.FormatUint(uint64(v), 10), nil
case uint16:
return strconv.FormatUint(uint64(v), 10), nil
case uint32:
return strconv.FormatUint(uint64(v), 10), nil
case uint64:
return strconv.FormatUint(v, 10), nil
case float32:
return strconv.FormatFloat(float64(v), 'f', -1, 32), nil
case float64:
return strconv.FormatFloat(v, 'f', -1, 64), nil
case bool:
return strconv.FormatBool(v), nil
case fmt.Stringer:
return v.String(), nil
case nil:
return "", nil
}
return "", fmt.Errorf("type \"%T\" unsupported", value)
}
func ToBool(value interface{}) (bool, error) {
switch v := value.(type) {
case string:
return strconv.ParseBool(v)
case []byte:
return strconv.ParseBool(string(v))
case fmt.Stringer:
return strconv.ParseBool(v.String())
case int:
return v > 0, nil
case int8:
return v > 0, nil
case int16:
return v > 0, nil
case int32:
return v > 0, nil
case int64:
return v > 0, nil
case uint:
return v > 0, nil
case uint8:
return v > 0, nil
case uint16:
return v > 0, nil
case uint32:
return v > 0, nil
case uint64:
return v > 0, nil
case float32:
return v > 0, nil
case float64:
return v > 0, nil
case bool:
return v, nil
case nil:
return false, nil
}
return false, fmt.Errorf("type \"%T\" unsupported", value)
}