diff options
author | 2017-06-13 12:39:10 -0700 | |
---|---|---|
committer | 2017-06-13 12:39:10 -0700 | |
commit | e9eda7e7c8ed75d62b02f23c62e8e318ea1685ae (patch) | |
tree | 152a04673698e1a39caa751c02612d4e21315662 /middleware/cache/handler.go | |
parent | b1efd3736e6e68ab01baf54f83071c62690899b2 (diff) | |
download | coredns-e9eda7e7c8ed75d62b02f23c62e8e318ea1685ae.tar.gz coredns-e9eda7e7c8ed75d62b02f23c62e8e318ea1685ae.tar.zst coredns-e9eda7e7c8ed75d62b02f23c62e8e318ea1685ae.zip |
New cache implementation and prefetch handing in mw/cache (#731)
* cache: add sharded cache implementation
Add Cache impl and a few tests. This cache is 256-way sharded, mainly
so each shard has it's own lock. The main cache structure is a readonly
jump plane into the right shard.
This should remove the single lock contention on the main lock and
provide more concurrent throughput - Obviously this hasn't been tested
or measured.
The key into the cache was made a uint32 (hash.fnv) and the hashing op
is not using strings.ToLower anymore remove any GC in that code path.
* here too
* Minimum shard size
* typos
* blurp
* small cleanups no defer
* typo
* Add freq based on Johns idea
* cherry-pick conflict resolv
* typo
* update from early code review from john
* add prefetch to the cache
* mw/cache: add prefetch
* remove println
* remove comment
* Fix tests
* Test prefetch in setup
* Add start of cache
* try add diff cache options
* Add hacky testcase
* not needed
* allow the use of a percentage for prefetch
If the TTL falls below xx% do a prefetch, if the record was popular.
Some other fixes and correctly prefetch only popular records.
Diffstat (limited to 'middleware/cache/handler.go')
-rw-r--r-- | middleware/cache/handler.go | 36 |
1 files changed, 29 insertions, 7 deletions
diff --git a/middleware/cache/handler.go b/middleware/cache/handler.go index 195322e31..520b23767 100644 --- a/middleware/cache/handler.go +++ b/middleware/cache/handler.go @@ -24,36 +24,58 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) do := state.Do() // TODO(): might need more from OPT record? Like the actual bufsize? - if i, ok, expired := c.get(qname, qtype, do); ok && !expired { + now := time.Now().UTC() + + i, ttl := c.get(now, qname, qtype, do) + if i != nil && ttl > 0 { resp := i.toMsg(r) state.SizeAndDo(resp) resp, _ = state.Scrub(resp) w.WriteMsg(resp) + i.Freq.Update(c.duration, now) + + pct := 100 + if i.origTTL != 0 { // you'll never know + pct = int(float64(ttl) / float64(i.origTTL) * 100) + } + + if c.prefetch > 0 && i.Freq.Hits() > c.prefetch && pct < c.percentage { + // When prefetching we loose the item i, and with it the frequency + // that we've gathered sofar. See we copy the frequence info back + // into the new item that was stored in the cache. + prr := &ResponseWriter{ResponseWriter: w, Cache: c, prefetch: true} + middleware.NextOrFailure(c.Name(), c.Next, ctx, prr, r) + + if i1, _ := c.get(now, qname, qtype, do); i1 != nil { + i1.Freq.Reset(now, i.Freq.Hits()) + } + } + return dns.RcodeSuccess, nil } - crr := &ResponseWriter{w, c} + crr := &ResponseWriter{ResponseWriter: w, Cache: c} return middleware.NextOrFailure(c.Name(), c.Next, ctx, crr, r) } // Name implements the Handler interface. func (c *Cache) Name() string { return "cache" } -func (c *Cache) get(qname string, qtype uint16, do bool) (*item, bool, bool) { - k := rawKey(qname, qtype, do) +func (c *Cache) get(now time.Time, qname string, qtype uint16, do bool) (*item, int) { + k := hash(qname, qtype, do) if i, ok := c.ncache.Get(k); ok { cacheHits.WithLabelValues(Denial).Inc() - return i.(*item), ok, i.(*item).expired(time.Now()) + return i.(*item), i.(*item).ttl(now) } if i, ok := c.pcache.Get(k); ok { cacheHits.WithLabelValues(Success).Inc() - return i.(*item), ok, i.(*item).expired(time.Now()) + return i.(*item), i.(*item).ttl(now) } cacheMisses.Inc() - return nil, false, false + return nil, 0 } var ( |