Adding upstream version 1.34.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
e393c3af3f
commit
4978089aab
4963 changed files with 677545 additions and 0 deletions
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
This is a development testing cli tool meant to stress the zipkin telegraf plugin.
|
||||
It writes a specified number of zipkin spans to the plugin endpoint, with other
|
||||
parameters which dictate batch size and flush timeout.
|
||||
|
||||
Usage as follows:
|
||||
|
||||
`./stress_test_write -batch_size=<batch_size> -max_backlog=<max_span_buffer_backlog> -batch_interval=<batch_interval_in_seconds> \
|
||||
-span_count<number_of_spans_to_write> -zipkin_host=<zipkin_service_hostname>`
|
||||
|
||||
Or with a timer:
|
||||
|
||||
`time ./stress_test_write -batch_size=<batch_size> -max_backlog=<max_span_buffer_backlog> -batch_interval=<batch_interval_in_seconds> \
|
||||
-span_count<number_of_spans_to_write> -zipkin_host=<zipkin_service_hostname>`
|
||||
|
||||
However, the flag defaults work just fine for a good write stress test (and are what
|
||||
this tool has mainly been tested with), so there shouldn't be much need to
|
||||
manually tweak the parameters.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
otlog "github.com/opentracing/opentracing-go/log"
|
||||
zipkinot "github.com/openzipkin-contrib/zipkin-go-opentracing"
|
||||
"github.com/openzipkin/zipkin-go"
|
||||
zipkinhttp "github.com/openzipkin/zipkin-go/reporter/http"
|
||||
)
|
||||
|
||||
var (
|
||||
batchSize int
|
||||
maxBackLog int
|
||||
batchTimeInterval int
|
||||
spanCount int
|
||||
zipkinServerHost string
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.IntVar(&batchSize, "batch_size", 10000, "")
|
||||
flag.IntVar(&maxBackLog, "max_backlog", 100000, "")
|
||||
flag.IntVar(&batchTimeInterval, "batch_interval", 1, "")
|
||||
flag.IntVar(&spanCount, "span_count", 100000, "")
|
||||
flag.StringVar(&zipkinServerHost, "zipkin_host", "localhost", "")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
var hostname = fmt.Sprintf("http://%s:9411/api/v1/spans", zipkinServerHost)
|
||||
reporter := zipkinhttp.NewReporter(
|
||||
hostname,
|
||||
zipkinhttp.BatchSize(batchSize),
|
||||
zipkinhttp.MaxBacklog(maxBackLog),
|
||||
zipkinhttp.BatchInterval(time.Duration(batchTimeInterval)*time.Second),
|
||||
)
|
||||
defer reporter.Close()
|
||||
|
||||
endpoint, err := zipkin.NewEndpoint("Trivial", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
log.Panicf("Error: %v\n", err)
|
||||
}
|
||||
|
||||
nativeTracer, err := zipkin.NewTracer(reporter, zipkin.WithLocalEndpoint(endpoint))
|
||||
if err != nil {
|
||||
log.Panicf("Error: %v\n", err)
|
||||
}
|
||||
|
||||
tracer := zipkinot.Wrap(nativeTracer)
|
||||
|
||||
log.Printf("Writing %d spans to zipkin server at %s\n", spanCount, hostname)
|
||||
for i := 0; i < spanCount; i++ {
|
||||
parent := tracer.StartSpan("Parent")
|
||||
parent.LogFields(otlog.Message(fmt.Sprintf("Trace%d", i)))
|
||||
parent.Finish()
|
||||
}
|
||||
log.Println("Done. Flushing remaining spans...")
|
||||
}
|
148
plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go
Normal file
148
plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go
Normal file
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
A small cli utility meant to convert json to zipkin thrift binary format, and
|
||||
vice versa.
|
||||
|
||||
To convert from json to thrift,
|
||||
the json is unmarshalled, converted to zipkincore.Span structures, and
|
||||
marshalled into thrift binary protocol. The json must be in an array format (even if it only has one object),
|
||||
because the tool automatically tries to unmarshal the json into an array of structs.
|
||||
|
||||
To convert from thrift to json,
|
||||
the opposite process must happen. The thrift binary data must be read into an array of
|
||||
zipkin span structures, and those spans must be marshalled into json.
|
||||
|
||||
Usage:
|
||||
|
||||
./thrift_serialize -input <input-file> -output <output-file> -deserialize<true|false>
|
||||
|
||||
If `deserialize` is set to true (false by default), the tool will interpret the input file as
|
||||
thrift, and write it as json to the output file.
|
||||
Otherwise, the input file will be interpreted as json, and the output will be encoded as thrift.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/apache/thrift/lib/go/thrift"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore"
|
||||
)
|
||||
|
||||
var (
|
||||
filename string
|
||||
outFileName string
|
||||
inputType string
|
||||
)
|
||||
|
||||
const usage = `./json_serialize -input <input> -output output -input-type<json|thrift>`
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&filename, "input", "", usage)
|
||||
flag.StringVar(&outFileName, "output", "", usage)
|
||||
flag.StringVar(&inputType, "input-type", "thrift", usage)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
contents, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading file: %v\n", err)
|
||||
}
|
||||
|
||||
switch inputType {
|
||||
case "json":
|
||||
raw, err := jsonToZipkinThrift(contents)
|
||||
if err != nil {
|
||||
log.Fatalf("%v\n", err)
|
||||
}
|
||||
if err := os.WriteFile(outFileName, raw, 0640); err != nil {
|
||||
log.Fatalf("%v", err)
|
||||
}
|
||||
case "thrift":
|
||||
raw, err := thriftToJSONSpans(contents)
|
||||
if err != nil {
|
||||
log.Fatalf("%v\n", err)
|
||||
}
|
||||
if err := os.WriteFile(outFileName, raw, 0640); err != nil {
|
||||
log.Fatalf("%v", err)
|
||||
}
|
||||
default:
|
||||
log.Fatalf("Unsupported input type")
|
||||
}
|
||||
}
|
||||
|
||||
func jsonToZipkinThrift(jsonRaw []byte) ([]byte, error) {
|
||||
if len(jsonRaw) == 0 {
|
||||
return nil, errors.New("no data")
|
||||
}
|
||||
|
||||
if string(jsonRaw)[0] != '[' {
|
||||
return nil, errors.New("cannot unmarshal non array type")
|
||||
}
|
||||
|
||||
var spans []*zipkincore.Span
|
||||
err := json.Unmarshal(jsonRaw, &spans)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling: %w", err)
|
||||
}
|
||||
|
||||
var zspans []*zipkincore.Span
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zspans = append(zspans, spans...)
|
||||
|
||||
buf := thrift.NewTMemoryBuffer()
|
||||
transport := thrift.NewTBinaryProtocolConf(buf, nil)
|
||||
|
||||
if err = transport.WriteListBegin(context.Background(), thrift.STRUCT, len(spans)); err != nil {
|
||||
return nil, fmt.Errorf("error in beginning thrift write: %w", err)
|
||||
}
|
||||
|
||||
for _, span := range zspans {
|
||||
err = span.Write(context.Background(), transport)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error converting zipkin struct to thrift: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = transport.WriteListEnd(context.Background()); err != nil {
|
||||
return nil, fmt.Errorf("error finishing thrift write: %w", err)
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func thriftToJSONSpans(thriftData []byte) ([]byte, error) {
|
||||
buffer := thrift.NewTMemoryBuffer()
|
||||
buffer.Write(thriftData)
|
||||
|
||||
transport := thrift.NewTBinaryProtocolConf(buffer, nil)
|
||||
_, size, err := transport.ReadListBegin(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in ReadListBegin: %w", err)
|
||||
}
|
||||
|
||||
spans := make([]*zipkincore.Span, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
zs := &zipkincore.Span{}
|
||||
if err = zs.Read(context.Background(), transport); err != nil {
|
||||
return nil, fmt.Errorf("error reading into zipkin struct: %w", err)
|
||||
}
|
||||
spans = append(spans, zs)
|
||||
}
|
||||
|
||||
err = transport.ReadListEnd(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error ending thrift read: %w", err)
|
||||
}
|
||||
|
||||
return json.MarshalIndent(spans, "", " ")
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue