aboutsummaryrefslogtreecommitdiff
path: root/middleware/pkg/cache/cache.go
diff options
context:
space:
mode:
authorGravatar Miek Gieben <miek@miek.nl> 2017-06-13 12:39:10 -0700
committerGravatar GitHub <noreply@github.com> 2017-06-13 12:39:10 -0700
commite9eda7e7c8ed75d62b02f23c62e8e318ea1685ae (patch)
tree152a04673698e1a39caa751c02612d4e21315662 /middleware/pkg/cache/cache.go
parentb1efd3736e6e68ab01baf54f83071c62690899b2 (diff)
downloadcoredns-e9eda7e7c8ed75d62b02f23c62e8e318ea1685ae.tar.gz
coredns-e9eda7e7c8ed75d62b02f23c62e8e318ea1685ae.tar.zst
coredns-e9eda7e7c8ed75d62b02f23c62e8e318ea1685ae.zip
New cache implementation and prefetch handing in mw/cache (#731)
* cache: add sharded cache implementation Add Cache impl and a few tests. This cache is 256-way sharded, mainly so each shard has it's own lock. The main cache structure is a readonly jump plane into the right shard. This should remove the single lock contention on the main lock and provide more concurrent throughput - Obviously this hasn't been tested or measured. The key into the cache was made a uint32 (hash.fnv) and the hashing op is not using strings.ToLower anymore remove any GC in that code path. * here too * Minimum shard size * typos * blurp * small cleanups no defer * typo * Add freq based on Johns idea * cherry-pick conflict resolv * typo * update from early code review from john * add prefetch to the cache * mw/cache: add prefetch * remove println * remove comment * Fix tests * Test prefetch in setup * Add start of cache * try add diff cache options * Add hacky testcase * not needed * allow the use of a percentage for prefetch If the TTL falls below xx% do a prefetch, if the record was popular. Some other fixes and correctly prefetch only popular records.
Diffstat (limited to 'middleware/pkg/cache/cache.go')
-rw-r--r--middleware/pkg/cache/cache.go129
1 files changed, 129 insertions, 0 deletions
diff --git a/middleware/pkg/cache/cache.go b/middleware/pkg/cache/cache.go
new file mode 100644
index 000000000..56cae2180
--- /dev/null
+++ b/middleware/pkg/cache/cache.go
@@ -0,0 +1,129 @@
+// Package cache implements a cache. The cache hold 256 shards, each shard
+// holds a cache: a map with a mutex. There is no fancy expunge algorithm, it
+// just randomly evicts elements when it gets full.
+package cache
+
+import (
+ "hash/fnv"
+ "sync"
+)
+
+// Hash returns the FNV hash of what.
+func Hash(what []byte) uint32 {
+ h := fnv.New32()
+ h.Write(what)
+ return h.Sum32()
+}
+
+// Cache is cache.
+type Cache struct {
+ shards [shardSize]*shard
+}
+
+// shard is a cache with random eviction.
+type shard struct {
+ items map[uint32]interface{}
+ size int
+
+ sync.RWMutex
+}
+
+// New returns a new cache.
+func New(size int) *Cache {
+ ssize := size / shardSize
+ if ssize < 512 {
+ ssize = 512
+ }
+
+ c := &Cache{}
+
+ // Initialize all the shards
+ for i := 0; i < shardSize; i++ {
+ c.shards[i] = newShard(ssize)
+ }
+ return c
+}
+
+// Add adds a new element to the cache. If the element already exists it is overwritten.
+func (c *Cache) Add(key uint32, el interface{}) {
+ shard := key & (shardSize - 1)
+ c.shards[shard].Add(key, el)
+}
+
+// Get looks up element index under key.
+func (c *Cache) Get(key uint32) (interface{}, bool) {
+ shard := key & (shardSize - 1)
+ return c.shards[shard].Get(key)
+}
+
+// Remove removes the element indexed with key.
+func (c *Cache) Remove(key uint32) {
+ shard := key & (shardSize - 1)
+ c.shards[shard].Remove(key)
+}
+
+// Len returns the number of elements in the cache.
+func (c *Cache) Len() int {
+ l := 0
+ for _, s := range c.shards {
+ l += s.Len()
+ }
+ return l
+}
+
+// newShard returns a new shard with size.
+func newShard(size int) *shard { return &shard{items: make(map[uint32]interface{}), size: size} }
+
+// Add adds element indexed by key into the cache. Any existing element is overwritten
+func (s *shard) Add(key uint32, el interface{}) {
+ l := s.Len()
+ if l+1 > s.size {
+ s.Evict()
+ }
+
+ s.Lock()
+ s.items[key] = el
+ s.Unlock()
+}
+
+// Remove removes the element indexed by key from the cache.
+func (s *shard) Remove(key uint32) {
+ s.Lock()
+ delete(s.items, key)
+ s.Unlock()
+}
+
+// Evict removes a random element from the cache.
+func (s *shard) Evict() {
+ s.Lock()
+ defer s.Unlock()
+
+ key := -1
+ for k := range s.items {
+ key = int(k)
+ break
+ }
+ if key == -1 {
+ // empty cache
+ return
+ }
+ delete(s.items, uint32(key))
+}
+
+// Get looks up the element indexed under key.
+func (s *shard) Get(key uint32) (interface{}, bool) {
+ s.RLock()
+ el, found := s.items[key]
+ s.RUnlock()
+ return el, found
+}
+
+// Len returns the current length of the cache.
+func (s *shard) Len() int {
+ s.RLock()
+ l := len(s.items)
+ s.RUnlock()
+ return l
+}
+
+const shardSize = 256