aboutsummaryrefslogtreecommitdiff
path: root/middleware/kubernetes
diff options
context:
space:
mode:
Diffstat (limited to 'middleware/kubernetes')
-rw-r--r--middleware/kubernetes/README.md26
-rw-r--r--middleware/kubernetes/controller.go195
-rw-r--r--middleware/kubernetes/handler.go25
-rw-r--r--middleware/kubernetes/kubernetes.go173
-rw-r--r--middleware/kubernetes/lookup.go65
-rw-r--r--middleware/kubernetes/msg/service.go5
-rwxr-xr-xmiddleware/kubernetes/test/20_setup_k8s_services.sh2
-rw-r--r--middleware/kubernetes/util/util.go16
8 files changed, 382 insertions, 125 deletions
diff --git a/middleware/kubernetes/README.md b/middleware/kubernetes/README.md
index 66cee605e..6d8006404 100644
--- a/middleware/kubernetes/README.md
+++ b/middleware/kubernetes/README.md
@@ -276,11 +276,13 @@ TBD:
* Update kubernetes middleware documentation to describe running CoreDNS as a
SkyDNS replacement. (Include descriptions of different ways to pass CoreFile
to coredns command.)
+ * Remove dependency on healthz for health checking in
+ `kubernetes-rc.yaml` file.
* Expose load-balancer IP addresses.
* Calculate SRV priority based on number of instances running.
(See SkyDNS README.md)
* Functional work
- * (done) ~~Implement wildcard-based lookup. Minimally support `*`, consider `?` as well.~~
+ * (done. '?' not supported yet) ~~Implement wildcard-based lookup. Minimally support `*`, consider `?` as well.~~
* (done) ~~Note from Miek on PR 181: "SkyDNS also supports the word `any`.~~
* Implement SkyDNS-style synthetic zones such as "svc" to group k8s objects. (This
should be optional behavior.) Also look at "pod" synthetic zones.
@@ -303,17 +305,14 @@ TBD:
* Performance
* Improve lookup to reduce size of query result obtained from k8s API.
(namespace-based?, other ideas?)
- * Caching of k8s API dataset.
+ * Caching/notification of k8s API dataset. (See aledbf fork for
+ implementation ideas.)
* DNS response caching is good, but we should also cache at the http query
level as well. (Take a look at https://github.com/patrickmn/go-cache as
a potential expiring cache implementation for the http API queries.)
- * Push notifications from k8s for data changes rather than pull via API?
* Additional features:
- * Implement namespace filtering to different zones. That is, zone "a.b"
- publishes services from namespace "foo", and zone "x.y" publishes services
- from namespaces "bar" and "baz". (Basic version implemented -- need test cases.)
* Reverse IN-ADDR entries for services. (Is there any value in supporting
- reverse lookup records?
+ reverse lookup records?)
* How to support label specification in Corefile to allow use of labels to
indicate zone? (Is this even useful?) For example, the following
configuration exposes all services labeled for the "staging" environment
@@ -334,11 +333,14 @@ TBD:
flattening to lower case and mapping of non-DNS characters to DNS characters
in a standard way.)
* Expose arbitrary kubernetes repository data as TXT records?
- * Support custom user-provided templates for k8s names. A string provided
+ * (done) ~~Support custom user-provided templates for k8s names. A string provided
in the middleware configuration like `{service}.{namespace}.{type}` defines
the template of how to construct record names for the zone. This example
would produce `myservice.mynamespace.svc.cluster.local`. (Basic template
- implemented. Need to slice zone out of current template implementation.)
+ implemented. Need to slice zone out of current template implementation.)~~
+ * (done) ~~Implement namespace filtering to different zones. That is, zone "a.b"
+ publishes services from namespace "foo", and zone "x.y" publishes services
+ from namespaces "bar" and "baz". (Basic version implemented -- need test cases.)~~
* DNS Correctness
* Do we need to generate synthetic zone records for namespaces?
* Do we need to generate synthetic zone records for the skydns synthetic zones?
@@ -347,10 +349,10 @@ TBD:
using the `cache` directive. Tested working using 20s cache timeout
and A-record queries. Automate testing with cache in place.
* Automate CoreDNS performance tests. Initially for zone files, and for
- pre-loaded k8s API cache.
+ pre-loaded k8s API cache. With and without CoreDNS response caching.
* Try to get rid of kubernetes launch scripts by moving operations into
.travis.yml file.
* ~~Implement test cases for http data parsing using dependency injection
for http get operations.~~
- * ~~Automate integration testing with kubernetes. (k8s launch and service start-up
- automation is in middleware/kubernetes/tests)~~
+ * ~~Automate integration testing with kubernetes. (k8s launch and service
+ start-up automation is in middleware/kubernetes/tests)~~
diff --git a/middleware/kubernetes/controller.go b/middleware/kubernetes/controller.go
new file mode 100644
index 000000000..6c94bdae1
--- /dev/null
+++ b/middleware/kubernetes/controller.go
@@ -0,0 +1,195 @@
+package kubernetes
+
+import (
+ "fmt"
+ "log"
+ "sync"
+ "time"
+
+ "github.com/miekg/coredns/middleware/kubernetes/util"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/client/cache"
+ client "k8s.io/kubernetes/pkg/client/unversioned"
+ "k8s.io/kubernetes/pkg/controller/framework"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+var (
+ namespace = api.NamespaceAll
+)
+
+type dnsController struct {
+ client *client.Client
+
+ endpController *framework.Controller
+ svcController *framework.Controller
+ nsController *framework.Controller
+
+ svcLister cache.StoreToServiceLister
+ endpLister cache.StoreToEndpointsLister
+ nsLister util.StoreToNamespaceLister
+
+ // stopLock is used to enforce only a single call to Stop is active.
+ // Needed because we allow stopping through an http endpoint and
+ // allowing concurrent stoppers leads to stack traces.
+ stopLock sync.Mutex
+ shutdown bool
+ stopCh chan struct{}
+}
+
+// newDNSController creates a controller for coredns
+func newdnsController(kubeClient *client.Client, resyncPeriod time.Duration) *dnsController {
+ dns := dnsController{
+ client: kubeClient,
+ stopCh: make(chan struct{}),
+ }
+
+ dns.endpLister.Store, dns.endpController = framework.NewInformer(
+ &cache.ListWatch{
+ ListFunc: endpointsListFunc(dns.client, namespace),
+ WatchFunc: endpointsWatchFunc(dns.client, namespace),
+ },
+ &api.Endpoints{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})
+
+ dns.svcLister.Store, dns.svcController = framework.NewInformer(
+ &cache.ListWatch{
+ ListFunc: serviceListFunc(dns.client, namespace),
+ WatchFunc: serviceWatchFunc(dns.client, namespace),
+ },
+ &api.Service{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})
+
+ dns.nsLister.Store, dns.nsController = framework.NewInformer(
+ &cache.ListWatch{
+ ListFunc: namespaceListFunc(dns.client),
+ WatchFunc: namespaceWatchFunc(dns.client),
+ },
+ &api.Namespace{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})
+
+ return &dns
+}
+
+func serviceListFunc(c *client.Client, ns string) func(api.ListOptions) (runtime.Object, error) {
+ return func(opts api.ListOptions) (runtime.Object, error) {
+ return c.Services(ns).List(opts)
+ }
+}
+
+func serviceWatchFunc(c *client.Client, ns string) func(options api.ListOptions) (watch.Interface, error) {
+ return func(options api.ListOptions) (watch.Interface, error) {
+ return c.Services(ns).Watch(options)
+ }
+}
+
+func endpointsListFunc(c *client.Client, ns string) func(api.ListOptions) (runtime.Object, error) {
+ return func(opts api.ListOptions) (runtime.Object, error) {
+ return c.Endpoints(ns).List(opts)
+ }
+}
+
+func endpointsWatchFunc(c *client.Client, ns string) func(options api.ListOptions) (watch.Interface, error) {
+ return func(options api.ListOptions) (watch.Interface, error) {
+ return c.Endpoints(ns).Watch(options)
+ }
+}
+
+func namespaceListFunc(c *client.Client) func(api.ListOptions) (runtime.Object, error) {
+ return func(opts api.ListOptions) (runtime.Object, error) {
+ return c.Namespaces().List(opts)
+ }
+}
+
+func namespaceWatchFunc(c *client.Client) func(options api.ListOptions) (watch.Interface, error) {
+ return func(options api.ListOptions) (watch.Interface, error) {
+ return c.Namespaces().Watch(options)
+ }
+}
+
+func (dns *dnsController) controllersInSync() bool {
+ return dns.svcController.HasSynced() && dns.endpController.HasSynced()
+}
+
+// Stop stops the controller.
+func (dns *dnsController) Stop() error {
+ dns.stopLock.Lock()
+ defer dns.stopLock.Unlock()
+
+ // Only try draining the workqueue if we haven't already.
+ if !dns.shutdown {
+ close(dns.stopCh)
+ log.Println("shutting down controller queues")
+ dns.shutdown = true
+
+ return nil
+ }
+
+ return fmt.Errorf("shutdown already in progress")
+}
+
+// Run starts the controller.
+func (dns *dnsController) Run() {
+ log.Println("[debug] starting coredns controller")
+
+ go dns.endpController.Run(dns.stopCh)
+ go dns.svcController.Run(dns.stopCh)
+ go dns.nsController.Run(dns.stopCh)
+
+ <-dns.stopCh
+ log.Println("[debug] shutting down coredns controller")
+}
+
+func (dns *dnsController) GetNamespaceList() *api.NamespaceList {
+ nsList, err := dns.nsLister.List()
+ if err != nil {
+ return &api.NamespaceList{}
+ }
+
+ return &nsList
+}
+
+func (dns *dnsController) GetServiceList() *api.ServiceList {
+ log.Printf("[debug] here in GetServiceList")
+ svcList, err := dns.svcLister.List()
+ if err != nil {
+ return &api.ServiceList{}
+ }
+
+ return &svcList
+}
+
+// GetServicesByNamespace returns a map of
+// namespacename :: [ kubernetesService ]
+func (dns *dnsController) GetServicesByNamespace() map[string][]api.Service {
+ k8sServiceList := dns.GetServiceList()
+ if k8sServiceList == nil {
+ return nil
+ }
+
+ items := make(map[string][]api.Service, len(k8sServiceList.Items))
+ for _, i := range k8sServiceList.Items {
+ namespace := i.Namespace
+ items[namespace] = append(items[namespace], i)
+ }
+
+ return items
+}
+
+// GetServiceInNamespace returns the Service that matches
+// servicename in the namespace
+func (dns *dnsController) GetServiceInNamespace(namespace string, servicename string) *api.Service {
+ svcKey := fmt.Sprintf("%v/%v", namespace, servicename)
+ svcObj, svcExists, err := dns.svcLister.Store.GetByKey(svcKey)
+
+ if err != nil {
+ log.Printf("error getting service %v from the cache: %v\n", svcKey, err)
+ return nil
+ }
+
+ if !svcExists {
+ log.Printf("service %v does not exists\n", svcKey)
+ return nil
+ }
+
+ return svcObj.(*api.Service)
+}
diff --git a/middleware/kubernetes/handler.go b/middleware/kubernetes/handler.go
index a78873426..05dfba934 100644
--- a/middleware/kubernetes/handler.go
+++ b/middleware/kubernetes/handler.go
@@ -3,6 +3,7 @@ package kubernetes
import (
"fmt"
"log"
+ "strings"
"github.com/miekg/coredns/middleware"
@@ -18,6 +19,26 @@ func (k Kubernetes) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.M
return dns.RcodeServerFailure, fmt.Errorf("can only deal with ClassINET")
}
+ m := new(dns.Msg)
+ m.SetReply(r)
+ m.Authoritative, m.RecursionAvailable, m.Compress = true, true, true
+
+ // TODO: find an alternative to this block
+ if strings.HasSuffix(state.Name(), arpaSuffix) {
+ ip, _ := extractIP(state.Name())
+ records := k.getServiceRecordForIP(ip, state.Name())
+ if len(records) > 0 {
+ srvPTR := &records[0]
+ m.Answer = append(m.Answer, srvPTR.NewPTR(state.QName(), ip))
+
+ m = dedup(m)
+ state.SizeAndDo(m)
+ m, _ = state.Scrub(m)
+ w.WriteMsg(m)
+ return dns.RcodeSuccess, nil
+ }
+ }
+
// Check that query matches one of the zones served by this middleware,
// otherwise delegate to the next in the pipeline.
zone := middleware.Zones(k.Zones).Matches(state.Name())
@@ -28,10 +49,6 @@ func (k Kubernetes) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.M
return k.Next.ServeDNS(ctx, w, r)
}
- m := new(dns.Msg)
- m.SetReply(r)
- m.Authoritative, m.RecursionAvailable, m.Compress = true, true, true
-
var (
records, extra []dns.RR
err error
diff --git a/middleware/kubernetes/kubernetes.go b/middleware/kubernetes/kubernetes.go
index 6288cb394..a8d33bb7e 100644
--- a/middleware/kubernetes/kubernetes.go
+++ b/middleware/kubernetes/kubernetes.go
@@ -4,33 +4,70 @@ package kubernetes
import (
"errors"
"log"
+ "strings"
"time"
"github.com/miekg/coredns/middleware"
- k8sc "github.com/miekg/coredns/middleware/kubernetes/k8sclient"
"github.com/miekg/coredns/middleware/kubernetes/msg"
"github.com/miekg/coredns/middleware/kubernetes/nametemplate"
"github.com/miekg/coredns/middleware/kubernetes/util"
"github.com/miekg/coredns/middleware/proxy"
"github.com/miekg/dns"
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/client/unversioned"
+ "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
+ clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
+)
+
+const (
+ defaultResyncPeriod = 5 * time.Minute
)
type Kubernetes struct {
Next middleware.Handler
Zones []string
Proxy proxy.Proxy // Proxy for looking up names during the resolution process
- APIConn *k8sc.K8sConnector
+ APIEndpoint string
+ APIConn *dnsController
+ ResyncPeriod time.Duration
NameTemplate *nametemplate.NameTemplate
Namespaces []string
}
+func (g *Kubernetes) StartKubeCache() error {
+ // For a custom api server or running outside a k8s cluster
+ // set URL in env.KUBERNETES_MASTER
+ loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
+ overrides := &clientcmd.ConfigOverrides{}
+ if len(g.APIEndpoint) > 0 {
+ overrides.ClusterInfo = clientcmdapi.Cluster{Server: g.APIEndpoint}
+ }
+ clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)
+ config, err := clientConfig.ClientConfig()
+ if err != nil {
+ log.Printf("[debug] error connecting to the client: %v", err)
+ return err
+ }
+ kubeClient, err := unversioned.New(config)
+
+ if err != nil {
+ log.Printf("[ERROR] Failed to create kubernetes notification controller: %v", err)
+ return err
+ }
+ g.APIConn = newdnsController(kubeClient, g.ResyncPeriod)
+
+ go g.APIConn.Run()
+
+ return err
+}
+
// getZoneForName returns the zone string that matches the name and a
// list of the DNS labels from name that are within the zone.
// For example, if "coredns.local" is a zone configured for the
// Kubernetes middleware, then getZoneForName("a.b.coredns.local")
// will return ("coredns.local", ["a", "b"]).
-func (g Kubernetes) getZoneForName(name string) (string, []string) {
+func (g *Kubernetes) getZoneForName(name string) (string, []string) {
var zone string
var serviceSegments []string
@@ -51,7 +88,14 @@ func (g Kubernetes) getZoneForName(name string) (string, []string) {
// If exact is true, it will lookup just
// this name. This is used when find matches when completing SRV lookups
// for instance.
-func (g Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
+func (g *Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
+ // TODO: refector this.
+ // Right now GetNamespaceFromSegmentArray do not supports PRE queries
+ if strings.HasSuffix(name, arpaSuffix) {
+ ip, _ := extractIP(name)
+ records := g.getServiceRecordForIP(ip, name)
+ return records, nil
+ }
var (
serviceName string
namespace string
@@ -99,6 +143,7 @@ func (g Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
return nil, nil
}
+ log.Printf("before g.Get(namespace, nsWildcard, serviceName, serviceWildcard): %v %v %v %v", namespace, nsWildcard, serviceName, serviceWildcard)
k8sItems, err := g.Get(namespace, nsWildcard, serviceName, serviceWildcard)
log.Printf("[debug] k8s items: %v\n", k8sItems)
if err != nil {
@@ -115,7 +160,7 @@ func (g Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
}
// TODO: assemble name from parts found in k8s data based on name template rather than reusing query string
-func (g Kubernetes) getRecordsForServiceItems(serviceItems []k8sc.ServiceItem, values nametemplate.NameValues) []msg.Service {
+func (g *Kubernetes) getRecordsForServiceItems(serviceItems []api.Service, values nametemplate.NameValues) []msg.Service {
var records []msg.Service
for _, item := range serviceItems {
@@ -131,7 +176,7 @@ func (g Kubernetes) getRecordsForServiceItems(serviceItems []k8sc.ServiceItem, v
// Create records for each exposed port...
for _, p := range item.Spec.Ports {
log.Printf("[debug] port: %v\n", p.Port)
- s := msg.Service{Host: clusterIP, Port: p.Port}
+ s := msg.Service{Host: clusterIP, Port: int(p.Port)}
records = append(records, s)
}
}
@@ -141,22 +186,24 @@ func (g Kubernetes) getRecordsForServiceItems(serviceItems []k8sc.ServiceItem, v
}
// Get performs the call to the Kubernetes http API.
-func (g Kubernetes) Get(namespace string, nsWildcard bool, servicename string, serviceWildcard bool) ([]k8sc.ServiceItem, error) {
- serviceList, err := g.APIConn.GetServiceList()
+func (g *Kubernetes) Get(namespace string, nsWildcard bool, servicename string, serviceWildcard bool) ([]api.Service, error) {
+ serviceList := g.APIConn.GetServiceList()
+ /* TODO: Remove?
if err != nil {
log.Printf("[ERROR] Getting service list produced error: %v", err)
return nil, err
}
+ */
- var resultItems []k8sc.ServiceItem
+ var resultItems []api.Service
for _, item := range serviceList.Items {
- if symbolMatches(namespace, item.Metadata.Namespace, nsWildcard) && symbolMatches(servicename, item.Metadata.Name, serviceWildcard) {
+ if symbolMatches(namespace, item.Namespace, nsWildcard) && symbolMatches(servicename, item.Name, serviceWildcard) {
// If namespace has a wildcard, filter results against Corefile namespace list.
// (Namespaces without a wildcard were filtered before the call to this function.)
- if nsWildcard && (len(g.Namespaces) > 0) && (!util.StringInSlice(item.Metadata.Namespace, g.Namespaces)) {
- log.Printf("[debug] Namespace '%v' is not published by Corefile\n", item.Metadata.Namespace)
+ if nsWildcard && (len(g.Namespaces) > 0) && (!util.StringInSlice(item.Namespace, g.Namespaces)) {
+ log.Printf("[debug] Namespace '%v' is not published by Corefile\n", item.Namespace)
continue
}
resultItems = append(resultItems, item)
@@ -179,102 +226,24 @@ func symbolMatches(queryString string, candidateString string, wildcard bool) bo
return result
}
-// TODO: Remove these unused functions. One is related to Ttl calculation
-// Implement Ttl and priority calculation based on service count before
-// removing this code.
-/*
-// splitDNSName separates the name into DNS segments and reverses the segments.
-func (g Kubernetes) splitDNSName(name string) []string {
- l := dns.SplitDomainName(name)
-
- for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 {
- l[i], l[j] = l[j], l[i]
- }
-
- return l
+// kubernetesNameError checks if the error is ErrorCodeKeyNotFound from kubernetes.
+func isKubernetesNameError(err error) bool {
+ return false
}
-*/
-// skydns/local/skydns/east/staging/web
-// skydns/local/skydns/west/production/web
-//
-// skydns/local/skydns/*/*/web
-// skydns/local/skydns/*/web
-/*
-// loopNodes recursively loops through the nodes and returns all the values. The nodes' keyname
-// will be match against any wildcards when star is true.
-func (g Kubernetes) loopNodes(ns []*etcdc.Node, nameParts []string, star bool, bx map[msg.Service]bool) (sx []msg.Service, err error) {
- if bx == nil {
- bx = make(map[msg.Service]bool)
- }
-Nodes:
- for _, n := range ns {
- if n.Dir {
- nodes, err := g.loopNodes(n.Nodes, nameParts, star, bx)
- if err != nil {
- return nil, err
- }
- sx = append(sx, nodes...)
- continue
- }
- if star {
- keyParts := strings.Split(n.Key, "/")
- for i, n := range nameParts {
- if i > len(keyParts)-1 {
- // name is longer than key
- continue Nodes
- }
- if n == "*" || n == "any" {
- continue
- }
- if keyParts[i] != n {
- continue Nodes
- }
- }
- }
- serv := new(msg.Service)
- if err := json.Unmarshal([]byte(n.Value), serv); err != nil {
- return nil, err
- }
- b := msg.Service{Host: serv.Host, Port: serv.Port, Priority: serv.Priority, Weight: serv.Weight, Text: serv.Text, Key: n.Key}
- if _, ok := bx[b]; ok {
- continue
- }
- bx[b] = true
- serv.Key = n.Key
- serv.Ttl = g.Ttl(n, serv)
- if serv.Priority == 0 {
- serv.Priority = priority
- }
- sx = append(sx, *serv)
+func (g *Kubernetes) getServiceRecordForIP(ip, name string) []msg.Service {
+ svcList, err := g.APIConn.svcLister.List()
+ if err != nil {
+ return nil
}
- return sx, nil
-}
-// Ttl returns the smaller of the kubernetes TTL and the service's
-// TTL. If neither of these are set (have a zero value), a default is used.
-func (g Kubernetes) Ttl(node *etcdc.Node, serv *msg.Service) uint32 {
- kubernetesTtl := uint32(node.TTL)
-
- if kubernetesTtl == 0 && serv.Ttl == 0 {
- return ttl
- }
- if kubernetesTtl == 0 {
- return serv.Ttl
- }
- if serv.Ttl == 0 {
- return kubernetesTtl
- }
- if kubernetesTtl < serv.Ttl {
- return kubernetesTtl
+ for _, service := range svcList.Items {
+ if service.Spec.ClusterIP == ip {
+ return []msg.Service{msg.Service{Host: ip}}
+ }
}
- return serv.Ttl
-}
-*/
-// kubernetesNameError checks if the error is ErrorCodeKeyNotFound from kubernetes.
-func isKubernetesNameError(err error) bool {
- return false
+ return nil
}
const (
diff --git a/middleware/kubernetes/lookup.go b/middleware/kubernetes/lookup.go
index b490d6a4b..0096e1fdb 100644
--- a/middleware/kubernetes/lookup.go
+++ b/middleware/kubernetes/lookup.go
@@ -4,6 +4,7 @@ import (
"fmt"
"math"
"net"
+ "strings"
"time"
"github.com/miekg/coredns/middleware"
@@ -12,6 +13,11 @@ import (
"github.com/miekg/dns"
)
+const (
+ // arpaSuffix is the standard suffix for PTR IP reverse lookups.
+ arpaSuffix = ".in-addr.arpa."
+)
+
func (k Kubernetes) records(state middleware.State, exact bool) ([]msg.Service, error) {
services, err := k.Records(state.Name(), exact)
if err != nil {
@@ -64,13 +70,13 @@ func (k Kubernetes) A(zone string, state middleware.State, previousRecords []dns
// We should already have found it
continue
}
- m1, e1 := k.Proxy.Lookup(state, target, state.QType())
- if e1 != nil {
+ mes, err := k.Proxy.Lookup(state, target, state.QType())
+ if err != nil {
continue
}
- // Len(m1.Answer) > 0 here is well?
+ // Len(mes.Answer) > 0 here is well?
records = append(records, newRecord)
- records = append(records, m1.Answer...)
+ records = append(records, mes.Answer...)
continue
case ip.To4() != nil:
records = append(records, serv.NewA(state.QName(), ip.To4()))
@@ -285,7 +291,33 @@ func (k Kubernetes) SOA(zone string, state middleware.State) *dns.SOA {
}
}
-// TODO(miek): DNSKEY and friends... intercepted by the DNSSEC middleware?
+func (k Kubernetes) PTR(zone string, state middleware.State) ([]dns.RR, error) {
+ reverseIP, ok := extractIP(state.Name())
+ if !ok {
+ return nil, fmt.Errorf("does not support reverse lookup for %s", state.QName())
+ }
+
+ records := make([]dns.RR, 1)
+ services, err := k.records(state, false)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, serv := range services {
+ ip := net.ParseIP(serv.Host)
+ if reverseIP != serv.Host {
+ continue
+ }
+ switch {
+ case ip.To4() != nil:
+ records = append(records, serv.NewPTR(state.QName(), ip.To4().String()))
+ break
+ case ip.To4() == nil:
+ // nodata?
+ }
+ }
+ return records, nil
+}
func isDuplicateCNAME(r *dns.CNAME, records []dns.RR) bool {
for _, rec := range records {
@@ -300,6 +332,27 @@ func isDuplicateCNAME(r *dns.CNAME, records []dns.RR) bool {
func copyState(state middleware.State, target string, typ uint16) middleware.State {
state1 := middleware.State{W: state.W, Req: state.Req.Copy()}
- state1.Req.Question[0] = dns.Question{dns.Fqdn(target), dns.ClassINET, typ}
+ state1.Req.Question[0] = dns.Question{Name: dns.Fqdn(target), Qtype: dns.ClassINET, Qclass: typ}
return state1
}
+
+// extractIP turns a standard PTR reverse record lookup name
+// into an IP address
+func extractIP(reverseName string) (string, bool) {
+ if !strings.HasSuffix(reverseName, arpaSuffix) {
+ return "", false
+ }
+ search := strings.TrimSuffix(reverseName, arpaSuffix)
+
+ // reverse the segments and then combine them
+ segments := reverseArray(strings.Split(search, "."))
+ return strings.Join(segments, "."), true
+}
+
+func reverseArray(arr []string) []string {
+ for i := 0; i < len(arr)/2; i++ {
+ j := len(arr) - i - 1
+ arr[i], arr[j] = arr[j], arr[i]
+ }
+ return arr
+}
diff --git a/middleware/kubernetes/msg/service.go b/middleware/kubernetes/msg/service.go
index 588e7b33c..24af6b4fd 100644
--- a/middleware/kubernetes/msg/service.go
+++ b/middleware/kubernetes/msg/service.go
@@ -77,6 +77,11 @@ func (s *Service) NewNS(name string) *dns.NS {
return &dns.NS{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: s.Ttl}, Ns: host}
}
+// NewPTR returns a new PTR record based on the Service.
+func (s *Service) NewPTR(name string, target string) *dns.PTR {
+ return &dns.PTR{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: s.Ttl}, Ptr: dns.Fqdn(target)}
+}
+
// Group checks the services in sx, it looks for a Group attribute on the shortest
// keys. If there are multiple shortest keys *and* the group attribute disagrees (and
// is not empty), we don't consider it a group.
diff --git a/middleware/kubernetes/test/20_setup_k8s_services.sh b/middleware/kubernetes/test/20_setup_k8s_services.sh
index d5c221a84..1eb993543 100755
--- a/middleware/kubernetes/test/20_setup_k8s_services.sh
+++ b/middleware/kubernetes/test/20_setup_k8s_services.sh
@@ -69,7 +69,7 @@ run_and_expose_service() {
wait_until_k8s_ready
-NAMESPACES="demo test"
+NAMESPACES="demo poddemo test"
create_namespaces
echo ""
diff --git a/middleware/kubernetes/util/util.go b/middleware/kubernetes/util/util.go
index 259eaf596..89cc2b592 100644
--- a/middleware/kubernetes/util/util.go
+++ b/middleware/kubernetes/util/util.go
@@ -3,6 +3,9 @@ package util
import (
"strings"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/client/cache"
)
// StringInSlice check whether string a is a member of slice.
@@ -24,3 +27,16 @@ const (
WildcardStar = "*"
WildcardAny = "any"
)
+
+// StoreToNamespaceLister makes a Store that lists Namespaces.
+type StoreToNamespaceLister struct {
+ cache.Store
+}
+
+// List lists all Namespaces in the store.
+func (s *StoreToNamespaceLister) List() (ns api.NamespaceList, err error) {
+ for _, m := range s.Store.List() {
+ ns.Items = append(ns.Items, *(m.(*api.Namespace)))
+ }
+ return ns, nil
+}