aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--plugin/trace/README.md8
-rw-r--r--plugin/trace/setup.go28
-rw-r--r--plugin/trace/setup_test.go62
-rw-r--r--plugin/trace/trace.go40
4 files changed, 105 insertions, 33 deletions
diff --git a/plugin/trace/README.md b/plugin/trace/README.md
index 0a0d0e848..eac8a7ba7 100644
--- a/plugin/trace/README.md
+++ b/plugin/trace/README.md
@@ -32,6 +32,9 @@ trace [ENDPOINT-TYPE] [ENDPOINT] {
service NAME
client_server
datadog_analytics_rate RATE
+ zipkin_max_backlog_size SIZE
+ zipkin_max_batch_size SIZE
+ zipkin_max_batch_interval DURATION
}
~~~
@@ -43,6 +46,11 @@ trace [ENDPOINT-TYPE] [ENDPOINT] {
* `datadog_analytics_rate` **RATE** will enable [trace analytics](https://docs.datadoghq.com/tracing/app_analytics) on the traces sent
from *0* to *1*, *1* being every trace sent will be analyzed. This is a datadog only feature
(**ENDPOINT-TYPE** needs to be `datadog`)
+* `zipkin_max_backlog_size` configures the maximum backlog size for Zipkin HTTP reporter. When batch size reaches this threshold,
+ spans from the beginning of the batch will be disposed. Default is 1000 backlog size.
+* `zipkin_max_batch_size` configures the maximum batch size for Zipkin HTTP reporter, after which a collect will be triggered. The default batch size is 100 traces.
+* `zipkin_max_batch_interval` configures the maximum duration we will buffer traces before emitting them to the collector using Zipkin HTTP reporter.
+ The default batch interval is 1 second.
## Zipkin
diff --git a/plugin/trace/setup.go b/plugin/trace/setup.go
index abe5db0f7..8672dcc53 100644
--- a/plugin/trace/setup.go
+++ b/plugin/trace/setup.go
@@ -4,6 +4,7 @@ import (
"fmt"
"strconv"
"strings"
+ "time"
"github.com/coredns/caddy"
"github.com/coredns/coredns/core/dnsserver"
@@ -100,6 +101,33 @@ func traceParse(c *caddy.Controller) (*trace, error) {
if tr.datadogAnalyticsRate > 1 || tr.datadogAnalyticsRate < 0 {
return nil, fmt.Errorf("datadog analytics rate must be between 0 and 1, '%f' is not supported", tr.datadogAnalyticsRate)
}
+ case "zipkin_max_backlog_size":
+ args := c.RemainingArgs()
+ if len(args) != 1 {
+ return nil, c.ArgErr()
+ }
+ tr.zipkinMaxBacklogSize, err = strconv.Atoi(args[0])
+ if err != nil {
+ return nil, err
+ }
+ case "zipkin_max_batch_size":
+ args := c.RemainingArgs()
+ if len(args) != 1 {
+ return nil, c.ArgErr()
+ }
+ tr.zipkinMaxBatchSize, err = strconv.Atoi(args[0])
+ if err != nil {
+ return nil, err
+ }
+ case "zipkin_max_batch_interval":
+ args := c.RemainingArgs()
+ if len(args) != 1 {
+ return nil, c.ArgErr()
+ }
+ tr.zipkinMaxBatchInterval, err = time.ParseDuration(args[0])
+ if err != nil {
+ return nil, err
+ }
}
}
}
diff --git a/plugin/trace/setup_test.go b/plugin/trace/setup_test.go
index bbc5f987f..72de4ab16 100644
--- a/plugin/trace/setup_test.go
+++ b/plugin/trace/setup_test.go
@@ -2,35 +2,48 @@ package trace
import (
"testing"
+ "time"
"github.com/coredns/caddy"
)
func TestTraceParse(t *testing.T) {
tests := []struct {
- input string
- shouldErr bool
- endpoint string
- every uint64
- serviceName string
- clientServer bool
+ input string
+ shouldErr bool
+ endpoint string
+ every uint64
+ serviceName string
+ clientServer bool
+ zipkinMaxBacklogSize int
+ zipkinMaxBatchSize int
+ zipkinMaxBatchInterval time.Duration
}{
// oks
- {`trace`, false, "http://localhost:9411/api/v2/spans", 1, `coredns`, false},
- {`trace localhost:1234`, false, "http://localhost:1234/api/v2/spans", 1, `coredns`, false},
- {`trace http://localhost:1234/somewhere/else`, false, "http://localhost:1234/somewhere/else", 1, `coredns`, false},
- {`trace zipkin localhost:1234`, false, "http://localhost:1234/api/v2/spans", 1, `coredns`, false},
- {`trace datadog localhost`, false, "localhost", 1, `coredns`, false},
- {`trace datadog http://localhost:8127`, false, "http://localhost:8127", 1, `coredns`, false},
- {"trace datadog localhost {\n datadog_analytics_rate 0.1\n}", false, "localhost", 1, `coredns`, false},
- {"trace {\n every 100\n}", false, "http://localhost:9411/api/v2/spans", 100, `coredns`, false},
- {"trace {\n every 100\n service foobar\nclient_server\n}", false, "http://localhost:9411/api/v2/spans", 100, `foobar`, true},
- {"trace {\n every 2\n client_server true\n}", false, "http://localhost:9411/api/v2/spans", 2, `coredns`, true},
- {"trace {\n client_server false\n}", false, "http://localhost:9411/api/v2/spans", 1, `coredns`, false},
+ {`trace`, false, "http://localhost:9411/api/v2/spans", 1, `coredns`, false, 0, 0, 0},
+ {`trace localhost:1234`, false, "http://localhost:1234/api/v2/spans", 1, `coredns`, false, 0, 0, 0},
+ {`trace http://localhost:1234/somewhere/else`, false, "http://localhost:1234/somewhere/else", 1, `coredns`, false, 0, 0, 0},
+ {`trace zipkin localhost:1234`, false, "http://localhost:1234/api/v2/spans", 1, `coredns`, false, 0, 0, 0},
+ {`trace datadog localhost`, false, "localhost", 1, `coredns`, false, 0, 0, 0},
+ {`trace datadog http://localhost:8127`, false, "http://localhost:8127", 1, `coredns`, false, 0, 0, 0},
+ {"trace datadog localhost {\n datadog_analytics_rate 0.1\n}", false, "localhost", 1, `coredns`, false, 0, 0, 0},
+ {"trace {\n every 100\n}", false, "http://localhost:9411/api/v2/spans", 100, `coredns`, false, 0, 0, 0},
+ {"trace {\n every 100\n service foobar\nclient_server\n}", false, "http://localhost:9411/api/v2/spans", 100, `foobar`, true, 0, 0, 0},
+ {"trace {\n every 2\n client_server true\n}", false, "http://localhost:9411/api/v2/spans", 2, `coredns`, true, 0, 0, 0},
+ {"trace {\n client_server false\n}", false, "http://localhost:9411/api/v2/spans", 1, `coredns`, false, 0, 0, 0},
+ {"trace {\n zipkin_max_backlog_size 100\n zipkin_max_batch_size 200\n zipkin_max_batch_interval 10s\n}", false,
+ "http://localhost:9411/api/v2/spans", 1, `coredns`, false, 100, 200, 10 * time.Second},
+
// fails
- {`trace footype localhost:4321`, true, "", 1, "", false},
- {"trace {\n every 2\n client_server junk\n}", true, "", 1, "", false},
- {"trace datadog localhost {\n datadog_analytics_rate 2\n}", true, "", 1, "", false},
+ {`trace footype localhost:4321`, true, "", 1, "", false, 0, 0, 0},
+ {"trace {\n every 2\n client_server junk\n}", true, "", 1, "", false, 0, 0, 0},
+ {"trace datadog localhost {\n datadog_analytics_rate 2\n}", true, "", 1, "", false, 0, 0, 0},
+ {"trace {\n zipkin_max_backlog_size wrong\n}", true, "", 1, `coredns`, false, 0, 0, 0},
+ {"trace {\n zipkin_max_batch_size wrong\n}", true, "", 1, `coredns`, false, 0, 0, 0},
+ {"trace {\n zipkin_max_batch_interval wrong\n}", true, "", 1, `coredns`, false, 0, 0, 0},
+ {"trace {\n zipkin_max_backlog_size\n}", true, "", 1, `coredns`, false, 0, 0, 0},
+ {"trace {\n zipkin_max_batch_size\n}", true, "", 1, `coredns`, false, 0, 0, 0},
+ {"trace {\n zipkin_max_batch_interval\n}", true, "", 1, `coredns`, false, 0, 0, 0},
}
for i, test := range tests {
c := caddy.NewTestController("dns", test.input)
@@ -62,5 +75,14 @@ func TestTraceParse(t *testing.T) {
if test.clientServer != m.clientServer {
t.Errorf("Test %v: Expected client_server %t but found: %t", i, test.clientServer, m.clientServer)
}
+ if test.zipkinMaxBacklogSize != m.zipkinMaxBacklogSize {
+ t.Errorf("Test %v: Expected zipkin_max_backlog_size %d but found: %d", i, test.zipkinMaxBacklogSize, m.zipkinMaxBacklogSize)
+ }
+ if test.zipkinMaxBatchSize != m.zipkinMaxBatchSize {
+ t.Errorf("Test %v: Expected zipkin_max_batch_size %d but found: %d", i, test.zipkinMaxBatchSize, m.zipkinMaxBatchSize)
+ }
+ if test.zipkinMaxBatchInterval != m.zipkinMaxBatchInterval {
+ t.Errorf("Test %v: Expected zipkin_max_batch_interval %v but found: %v", i, test.zipkinMaxBatchInterval, m.zipkinMaxBatchInterval)
+ }
}
}
diff --git a/plugin/trace/trace.go b/plugin/trace/trace.go
index 5c69b9249..f7409679d 100644
--- a/plugin/trace/trace.go
+++ b/plugin/trace/trace.go
@@ -8,6 +8,7 @@ import (
"net/http"
"sync"
"sync/atomic"
+ "time"
"github.com/coredns/coredns/core/dnsserver"
"github.com/coredns/coredns/plugin"
@@ -66,17 +67,20 @@ var tagByProvider = map[string]traceTags{
type trace struct {
count uint64 // as per Go spec, needs to be first element in a struct
- Next plugin.Handler
- Endpoint string
- EndpointType string
- tracer ot.Tracer
- serviceEndpoint string
- serviceName string
- clientServer bool
- every uint64
- datadogAnalyticsRate float64
- Once sync.Once
- tagSet traceTags
+ Next plugin.Handler
+ Endpoint string
+ EndpointType string
+ tracer ot.Tracer
+ serviceEndpoint string
+ serviceName string
+ clientServer bool
+ every uint64
+ datadogAnalyticsRate float64
+ zipkinMaxBacklogSize int
+ zipkinMaxBatchSize int
+ zipkinMaxBatchInterval time.Duration
+ Once sync.Once
+ tagSet traceTags
}
func (t *trace) Tracer() ot.Tracer {
@@ -109,8 +113,18 @@ func (t *trace) OnStartup() error {
}
func (t *trace) setupZipkin() error {
- logOpt := zipkinhttp.Logger(stdlog.New(&loggerAdapter{log}, "", 0))
- reporter := zipkinhttp.NewReporter(t.Endpoint, logOpt)
+ var opts []zipkinhttp.ReporterOption
+ opts = append(opts, zipkinhttp.Logger(stdlog.New(&loggerAdapter{log}, "", 0)))
+ if t.zipkinMaxBacklogSize != 0 {
+ opts = append(opts, zipkinhttp.MaxBacklog(t.zipkinMaxBacklogSize))
+ }
+ if t.zipkinMaxBatchSize != 0 {
+ opts = append(opts, zipkinhttp.BatchSize(t.zipkinMaxBatchSize))
+ }
+ if t.zipkinMaxBatchInterval != 0 {
+ opts = append(opts, zipkinhttp.BatchInterval(t.zipkinMaxBatchInterval))
+ }
+ reporter := zipkinhttp.NewReporter(t.Endpoint, opts...)
recorder, err := zipkin.NewEndpoint(t.serviceName, t.serviceEndpoint)
if err != nil {
log.Warningf("build Zipkin endpoint found err: %v", err)