aboutsummaryrefslogtreecommitdiff
path: root/plugin/kubernetes/kubernetes.go
diff options
context:
space:
mode:
authorGravatar Miek Gieben <miek@miek.nl> 2017-10-15 19:38:39 +0200
committerGravatar GitHub <noreply@github.com> 2017-10-15 19:38:39 +0200
commite34e2c251f236934b0d1928d521c35305dd3f389 (patch)
treefafc78d88825ab244942aabe6b8f14cc183d185d /plugin/kubernetes/kubernetes.go
parentc7ff44fb3a4ac782108980b7d91803f341ec1614 (diff)
downloadcoredns-e34e2c251f236934b0d1928d521c35305dd3f389.tar.gz
coredns-e34e2c251f236934b0d1928d521c35305dd3f389.tar.zst
coredns-e34e2c251f236934b0d1928d521c35305dd3f389.zip
plugin/proxy: kick of HC on every 3rd failure (#1110)
* healthchecks: check on every 3rd failure Check on every third failure and some cleanups to make this possible. A failed healthcheck will never increase Fails, a successfull healthceck will reset Fails to 0. This is a chance this counter now drops below 0, making the upstream super? healthy. This removes the okUntil smartness and condences everything back to 1 metrics: Fails; so it's simpler in that regard. Timout errors are *not* attributed to the local upstream, and don't get counted into the Fails anymore. Meaning the 'dig any isc.org' won't kill your upstream. Added extra test the see if the Fails counter gets reset after 3 failed connection. There is still a disconnect beween HTTP healthceck working the proxy (or lookup) not being able to connect to the upstream. * Fix tests
Diffstat (limited to 'plugin/kubernetes/kubernetes.go')
-rw-r--r--plugin/kubernetes/kubernetes.go15
1 files changed, 2 insertions, 13 deletions
diff --git a/plugin/kubernetes/kubernetes.go b/plugin/kubernetes/kubernetes.go
index 211d2573c..d8173769c 100644
--- a/plugin/kubernetes/kubernetes.go
+++ b/plugin/kubernetes/kubernetes.go
@@ -175,7 +175,6 @@ func (k *Kubernetes) getClientConfig() (*rest.Config, error) {
HealthCheck: healthcheck.HealthCheck{
FailTimeout: 3 * time.Second,
MaxFails: 1,
- Future: 10 * time.Second,
Path: "/",
Interval: 5 * time.Second,
},
@@ -190,21 +189,11 @@ func (k *Kubernetes) getClientConfig() (*rest.Config, error) {
CheckDown: func(upstream *proxyHandler) healthcheck.UpstreamHostDownFunc {
return func(uh *healthcheck.UpstreamHost) bool {
- down := false
-
- uh.Lock()
- until := uh.OkUntil
- uh.Unlock()
-
- if !until.IsZero() && time.Now().After(until) {
- down = true
- }
-
fails := atomic.LoadInt32(&uh.Fails)
if fails >= upstream.MaxFails && upstream.MaxFails != 0 {
- down = true
+ return true
}
- return down
+ return false
}
}(&k.APIProxy.handler),
}