aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Makefile7
-rw-r--r--core/https/crypto.go4
-rw-r--r--core/https/crypto_test.go6
-rw-r--r--middleware/kubernetes/README.md10
-rw-r--r--middleware/kubernetes/handler.go1
-rw-r--r--middleware/kubernetes/k8sclient/k8sclient.go24
-rw-r--r--middleware/kubernetes/kubernetes.go105
-rwxr-xr-xmiddleware/kubernetes/test/00_run_k8s.sh39
-rwxr-xr-xmiddleware/kubernetes/test/10_setup_kubectl.sh18
-rwxr-xr-xmiddleware/kubernetes/test/15_run_skydns.sh39
-rwxr-xr-xmiddleware/kubernetes/test/20_setup_k8s_services.sh80
-rw-r--r--middleware/kubernetes/test/README.md35
-rwxr-xr-xmiddleware/kubernetes/test/kill_all_containers.sh5
-rw-r--r--middleware/kubernetes/util/util.go14
-rw-r--r--middleware/kubernetes/util/util_test.go22
-rw-r--r--test/kubernetes_test.go182
17 files changed, 526 insertions, 66 deletions
diff --git a/.gitignore b/.gitignore
index fe121cbce..699137222 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,3 +2,4 @@ query.log
Corefile
*.swp
coredns
+kubectl
diff --git a/Makefile b/Makefile
index faedc63c8..482b557b3 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
BUILD_VERBOSE := -v
TEST_VERBOSE :=
-#TEST_VERBOSE := -v
+TEST_VERBOSE := -v
all:
go build $(BUILD_VERBOSE)
@@ -20,6 +20,11 @@ deps:
test:
go test $(TEST_VERBOSE) ./...
+.PHONY: testk8s
+testk8s:
+# go test $(TEST_VERBOSE) -tags=k8sIntegration ./...
+ go test $(TEST_VERBOSE) -tags=k8sIntegration -run 'TestK8sIntegration' ./test
+
.PHONY: clean
clean:
go clean
diff --git a/core/https/crypto.go b/core/https/crypto.go
index bc0ff6373..7971bda36 100644
--- a/core/https/crypto.go
+++ b/core/https/crypto.go
@@ -18,7 +18,7 @@ func loadPrivateKey(file string) (crypto.PrivateKey, error) {
return nil, err
}
keyBlock, _ := pem.Decode(keyBytes)
-
+
switch keyBlock.Type {
case "RSA PRIVATE KEY":
return x509.ParsePKCS1PrivateKey(keyBlock.Bytes)
@@ -45,7 +45,7 @@ func savePrivateKey(key crypto.PrivateKey, file string) error {
pemType = "RSA"
keyBytes = x509.MarshalPKCS1PrivateKey(key)
}
-
+
pemKey := pem.Block{Type: pemType + " PRIVATE KEY", Bytes: keyBytes}
keyOut, err := os.Create(file)
if err != nil {
diff --git a/core/https/crypto_test.go b/core/https/crypto_test.go
index c1f32b27d..07d2af5c7 100644
--- a/core/https/crypto_test.go
+++ b/core/https/crypto_test.go
@@ -88,9 +88,9 @@ func TestSaveAndLoadECCPrivateKey(t *testing.T) {
}
// verify loaded key is correct
- if !PrivateKeysSame(privateKey, loadedKey) {
- t.Error("Expected key bytes to be the same, but they weren't")
- }
+ if !PrivateKeysSame(privateKey, loadedKey) {
+ t.Error("Expected key bytes to be the same, but they weren't")
+ }
}
// PrivateKeysSame compares the bytes of a and b and returns true if they are the same.
diff --git a/middleware/kubernetes/README.md b/middleware/kubernetes/README.md
index c5ba32f72..2f2f7341d 100644
--- a/middleware/kubernetes/README.md
+++ b/middleware/kubernetes/README.md
@@ -7,6 +7,7 @@ are constructed as "myservice.mynamespace.coredns.local" where:
* "mynamespace" is the k8s namespace for the service, and
* "coredns.local" is the zone configured for `kubernetes`.
+The record name format can be changed by specifying a name template in the Corefile.
## Syntax
@@ -36,6 +37,10 @@ This is the default kubernetes setup, with everything specified in full:
kubernetes coredns.local {
# Use url for k8s API endpoint
endpoint http://localhost:8080
+ # Assemble k8s record names with the template
+ template {service}.{namespace}.{zone}
+ # Only expose the k8s namespace "demo"
+ namespaces demo
}
# cache 160 coredns.local
}
@@ -247,7 +252,7 @@ return the IP addresses for all services with "nginx" in the service name.
TBD:
* How does this relate the the k8s load-balancer configuration?
-* Do wildcards search across namespaces?
+* Do wildcards search across namespaces? (Yes)
* Initial implementation assumes that a namespace maps to the first DNS label
below the zone managed by the kubernetes middleware. This assumption may
need to be revised.
@@ -344,4 +349,5 @@ TBD:
and A-record queries. Automate testing with cache in place.
* Automate CoreDNS performance tests. Initially for zone files, and for
pre-loaded k8s API cache.
- * Automate integration testing with kubernetes.
+ * Automate integration testing with kubernetes. (k8s launch and service start-up
+ automation is in middleware/kubernetes/tests)
diff --git a/middleware/kubernetes/handler.go b/middleware/kubernetes/handler.go
index 168b65508..49d1c1573 100644
--- a/middleware/kubernetes/handler.go
+++ b/middleware/kubernetes/handler.go
@@ -10,7 +10,6 @@ import (
)
func (k Kubernetes) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
-
fmt.Printf("[debug] here entering ServeDNS: ctx:%v dnsmsg:%v\n", ctx, r)
state := middleware.State{W: w, Req: r}
diff --git a/middleware/kubernetes/k8sclient/k8sclient.go b/middleware/kubernetes/k8sclient/k8sclient.go
index 95300f3b9..cb1d0fe4a 100644
--- a/middleware/kubernetes/k8sclient/k8sclient.go
+++ b/middleware/kubernetes/k8sclient/k8sclient.go
@@ -117,30 +117,6 @@ func (c *K8sConnector) GetServicesByNamespace() (map[string][]ServiceItem, error
return items, nil
}
-// GetServiceItemsInNamespace returns the ServiceItems that match
-// servicename in the namespace
-func (c *K8sConnector) GetServiceItemsInNamespace(namespace string, servicename string) ([]*ServiceItem, error) {
-
- itemMap, err := c.GetServicesByNamespace()
-
- if err != nil {
- fmt.Printf("[ERROR] Getting service list produced error: %v", err)
- return nil, err
- }
-
- // TODO: Handle case where namespace == nil
-
- var serviceItems []*ServiceItem
-
- for _, x := range itemMap[namespace] {
- if x.Metadata.Name == servicename {
- serviceItems = append(serviceItems, &x)
- }
- }
-
- return serviceItems, nil
-}
-
func NewK8sConnector(baseURL string) *K8sConnector {
k := new(K8sConnector)
diff --git a/middleware/kubernetes/kubernetes.go b/middleware/kubernetes/kubernetes.go
index d6d93f809..f9a0712fe 100644
--- a/middleware/kubernetes/kubernetes.go
+++ b/middleware/kubernetes/kubernetes.go
@@ -62,7 +62,7 @@ func (g Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
typeName string
)
- fmt.Println("enter Records('", name, "', ", exact, ")")
+ fmt.Println("[debug] enter Records('", name, "', ", exact, ")")
zone, serviceSegments := g.getZoneForName(name)
/*
@@ -83,6 +83,18 @@ func (g Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
serviceName = g.NameTemplate.GetServiceFromSegmentArray(serviceSegments)
typeName = g.NameTemplate.GetTypeFromSegmentArray(serviceSegments)
+ if namespace == "" {
+ err := errors.New("Parsing query string did not produce a namespace value. Assuming wildcard namespace.")
+ fmt.Printf("[WARN] %v\n", err)
+ namespace = util.WildcardStar
+ }
+
+ if serviceName == "" {
+ err := errors.New("Parsing query string did not produce a serviceName value. Assuming wildcard serviceName.")
+ fmt.Printf("[WARN] %v\n", err)
+ serviceName = util.WildcardStar
+ }
+
fmt.Println("[debug] exact: ", exact)
fmt.Println("[debug] zone: ", zone)
fmt.Println("[debug] servicename: ", serviceName)
@@ -90,21 +102,18 @@ func (g Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
fmt.Println("[debug] typeName: ", typeName)
fmt.Println("[debug] APIconn: ", g.APIConn)
- // TODO: Implement wildcard support to allow blank namespace value
- if namespace == "" {
- err := errors.New("Parsing query string did not produce a namespace value")
- fmt.Printf("[ERROR] %v\n", err)
- return nil, err
- }
+ nsWildcard := util.SymbolContainsWildcard(namespace)
+ serviceWildcard := util.SymbolContainsWildcard(serviceName)
- // Abort if the namespace is not published per CoreFile
- if g.Namespaces != nil && !util.StringInSlice(namespace, *g.Namespaces) {
+ // Abort if the namespace does not contain a wildcard, and namespace is not published per CoreFile
+ // Case where namespace contains a wildcard is handled in Get(...) method.
+ if (!nsWildcard) && (g.Namespaces != nil && !util.StringInSlice(namespace, *g.Namespaces)) {
+ fmt.Printf("[debug] Namespace '%v' is not published by Corefile\n", namespace)
return nil, nil
}
- k8sItems, err := g.APIConn.GetServiceItemsInNamespace(namespace, serviceName)
+ k8sItems, err := g.Get(namespace, nsWildcard, serviceName, serviceWildcard)
fmt.Println("[debug] k8s items:", k8sItems)
-
if err != nil {
fmt.Printf("[ERROR] Got error while looking up ServiceItems. Error is: %v\n", err)
return nil, err
@@ -114,29 +123,27 @@ func (g Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
return nil, nil
}
- // test := g.NameTemplate.GetRecordNameFromNameValues(nametemplate.NameValues{ServiceName: serviceName, TypeName: typeName, Namespace: namespace, Zone: zone})
- // fmt.Printf("[debug] got recordname %v\n", test)
-
- records := g.getRecordsForServiceItems(k8sItems, name)
-
+ records := g.getRecordsForServiceItems(k8sItems, nametemplate.NameValues{TypeName: typeName, ServiceName: serviceName, Namespace: namespace, Zone: zone})
return records, nil
}
// TODO: assemble name from parts found in k8s data based on name template rather than reusing query string
-func (g Kubernetes) getRecordsForServiceItems(serviceItems []*k8sc.ServiceItem, name string) []msg.Service {
+func (g Kubernetes) getRecordsForServiceItems(serviceItems []k8sc.ServiceItem, values nametemplate.NameValues) []msg.Service {
var records []msg.Service
for _, item := range serviceItems {
- fmt.Println("[debug] clusterIP:", item.Spec.ClusterIP)
- for _, p := range item.Spec.Ports {
- fmt.Println("[debug] port:", p.Port)
- }
-
clusterIP := item.Spec.ClusterIP
+ fmt.Println("[debug] clusterIP:", clusterIP)
+
+ // Create records by constructing record name from template...
+ //values.Namespace = item.Metadata.Namespace
+ //values.ServiceName = item.Metadata.Name
+ //s := msg.Service{Host: g.NameTemplate.GetRecordNameFromNameValues(values)}
+ //records = append(records, s)
- s := msg.Service{Host: name}
- records = append(records, s)
+ // Create records for each exposed port...
for _, p := range item.Spec.Ports {
+ fmt.Println("[debug] port:", p.Port)
s := msg.Service{Host: clusterIP, Port: p.Port}
records = append(records, s)
}
@@ -146,17 +153,50 @@ func (g Kubernetes) getRecordsForServiceItems(serviceItems []*k8sc.ServiceItem,
return records
}
-/*
// Get performs the call to the Kubernetes http API.
-func (g Kubernetes) Get(path string, recursive bool) (bool, error) {
+func (g Kubernetes) Get(namespace string, nsWildcard bool, servicename string, serviceWildcard bool) ([]k8sc.ServiceItem, error) {
+ serviceList, err := g.APIConn.GetServiceList()
- fmt.Println("[debug] in Get path: ", path)
- fmt.Println("[debug] in Get recursive: ", recursive)
+ if err != nil {
+ fmt.Printf("[ERROR] Getting service list produced error: %v", err)
+ return nil, err
+ }
- return false, nil
+ var resultItems []k8sc.ServiceItem
+
+ for _, item := range serviceList.Items {
+ if symbolMatches(namespace, item.Metadata.Namespace, nsWildcard) && symbolMatches(servicename, item.Metadata.Name, serviceWildcard) {
+ // If namespace has a wildcard, filter results against Corefile namespace list.
+ // (Namespaces without a wildcard were filtered before the call to this function.)
+ if nsWildcard && (g.Namespaces != nil && !util.StringInSlice(item.Metadata.Namespace, *g.Namespaces)) {
+ fmt.Printf("[debug] Namespace '%v' is not published by Corefile\n", item.Metadata.Namespace)
+ continue
+ }
+ resultItems = append(resultItems, item)
+ }
+ }
+
+ return resultItems, nil
}
-*/
+func symbolMatches(queryString string, candidateString string, wildcard bool) bool {
+ result := false
+ switch {
+ case !wildcard:
+ result = (queryString == candidateString)
+ case queryString == util.WildcardStar:
+ result = true
+ case queryString == util.WildcardAny:
+ result = true
+ }
+ return result
+}
+
+// TODO: Remove these unused functions. One is related to Ttl calculation
+// Implement Ttl and priority calculation based on service count before
+// removing this code.
+/*
+// splitDNSName separates the name into DNS segments and reverses the segments.
func (g Kubernetes) splitDNSName(name string) []string {
l := dns.SplitDomainName(name)
@@ -166,16 +206,15 @@ func (g Kubernetes) splitDNSName(name string) []string {
return l
}
-
+*/
// skydns/local/skydns/east/staging/web
// skydns/local/skydns/west/production/web
//
// skydns/local/skydns/*/*/web
// skydns/local/skydns/*/web
-
+/*
// loopNodes recursively loops through the nodes and returns all the values. The nodes' keyname
// will be match against any wildcards when star is true.
-/*
func (g Kubernetes) loopNodes(ns []*etcdc.Node, nameParts []string, star bool, bx map[msg.Service]bool) (sx []msg.Service, err error) {
if bx == nil {
bx = make(map[msg.Service]bool)
diff --git a/middleware/kubernetes/test/00_run_k8s.sh b/middleware/kubernetes/test/00_run_k8s.sh
new file mode 100755
index 000000000..3753cbd8e
--- /dev/null
+++ b/middleware/kubernetes/test/00_run_k8s.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Based on instructions at: http://kubernetes.io/docs/getting-started-guides/docker/
+
+#K8S_VERSION=$(curl -sS https://storage.googleapis.com/kubernetes-release/release/latest.txt)
+K8S_VERSION="v1.2.4"
+
+ARCH="amd64"
+
+
+export K8S_VERSION
+export ARCH
+
+#RUN_SKYDNS="yes"
+RUN_SKYDNS="no"
+
+if [ "${RUN_SKYDNS}" = "yes" ]; then
+ DNS_ARGUMENTS="--cluster-dns=10.0.0.10 --cluster-domain=cluster.local"
+else
+ DNS_ARGUMENTS=""
+fi
+
+docker run -d \
+ --volume=/:/rootfs:ro \
+ --volume=/sys:/sys:ro \
+ --volume=/var/lib/docker/:/var/lib/docker:rw \
+ --volume=/var/lib/kubelet/:/var/lib/kubelet:rw \
+ --volume=/var/run:/var/run:rw \
+ --net=host \
+ --pid=host \
+ --privileged \
+ gcr.io/google_containers/hyperkube-${ARCH}:${K8S_VERSION} \
+ /hyperkube kubelet \
+ --containerized \
+ --hostname-override=127.0.0.1 \
+ --api-servers=http://localhost:8080 \
+ --config=/etc/kubernetes/manifests \
+ ${DNS_ARGUMENTS} \
+ --allow-privileged --v=2
diff --git a/middleware/kubernetes/test/10_setup_kubectl.sh b/middleware/kubernetes/test/10_setup_kubectl.sh
new file mode 100755
index 000000000..c47b779a3
--- /dev/null
+++ b/middleware/kubernetes/test/10_setup_kubectl.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+PWD=`pwd`
+BASEDIR=`realpath $(dirname ${0})`
+
+cd ${BASEDIR}
+if [ ! -e kubectl ]; then
+ curl -O http://storage.googleapis.com/kubernetes-release/release/v1.2.4/bin/linux/amd64/kubectl
+ chmod u+x kubectl
+fi
+
+${BASEDIR}/kubectl config set-cluster test-doc --server=http://localhost:8080
+${BASEDIR}/kubectl config set-context test-doc --cluster=test-doc
+${BASEDIR}/kubectl config use-context test-doc
+
+cd ${PWD}
+
+alias kubctl="${BASEDIR}/kubectl"
diff --git a/middleware/kubernetes/test/15_run_skydns.sh b/middleware/kubernetes/test/15_run_skydns.sh
new file mode 100755
index 000000000..b88cf8f9b
--- /dev/null
+++ b/middleware/kubernetes/test/15_run_skydns.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Running skydns based on instructions at: https://testdatamanagement.wordpress.com/2015/09/01/running-kubernetes-in-docker-with-dns-on-a-single-node/
+
+KUBECTL='./kubectl'
+
+#RUN_SKYDNS="yes"
+RUN_SKYDNS="no"
+
+wait_until_k8s_ready() {
+ # Wait until kubernetes is up and fully responsive
+ while :
+ do
+ ${KUBECTL} get nodes 2>/dev/null | grep -q '127.0.0.1'
+ if [ "${?}" = "0" ]; then
+ break
+ else
+ echo "sleeping for 5 seconds"
+ sleep 5
+ fi
+ done
+ echo "kubernetes nodes:"
+ ${KUBECTL} get nodes
+}
+
+
+if [ "${RUN_SKYDNS}" = "yes" ]; then
+ wait_until_k8s_ready
+
+ echo "Launch kube2sky..."
+ docker run -d --net=host gcr.io/google_containers/kube2sky:1.11 --kube_master_url=http://127.0.0.1:8080 --domain=cluster.local
+
+ echo ""
+
+ echo "Launch SkyDNS..."
+ docker run -d --net=host gcr.io/google_containers/skydns:2015-03-11-001 --machines=http://localhost:4001 --addr=0.0.0.0:53 --domain=cluster.local
+else
+ true
+fi
diff --git a/middleware/kubernetes/test/20_setup_k8s_services.sh b/middleware/kubernetes/test/20_setup_k8s_services.sh
new file mode 100755
index 000000000..0d067cf26
--- /dev/null
+++ b/middleware/kubernetes/test/20_setup_k8s_services.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+KUBECTL='./kubectl'
+
+wait_until_k8s_ready() {
+ # Wait until kubernetes is up and fully responsive
+ while :
+ do
+ ${KUBECTL} get nodes 2>/dev/null | grep -q '127.0.0.1'
+ if [ "${?}" = "0" ]; then
+ break
+ else
+ echo "sleeping for 5 seconds"
+ sleep 5
+ fi
+ done
+ echo "kubernetes nodes:"
+ ${KUBECTL} get nodes
+}
+
+create_namespaces() {
+ for n in ${NAMESPACES};
+ do
+ echo "Creating namespace: ${n}"
+ ${KUBECTL} get namespaces --no-headers 2>/dev/null | grep -q ${n}
+ if [ "${?}" != "0" ]; then
+ ${KUBECTL} create namespace ${n}
+ fi
+ done
+
+ echo "kubernetes namespaces:"
+ ${KUBECTL} get namespaces
+}
+
+# run_and_expose_service <servicename> <namespace> <image> <port>
+run_and_expose_service() {
+
+ if [ "${#}" != "4" ]; then
+ return -1
+ fi
+
+ service="${1}"
+ namespace="${2}"
+ image="${3}"
+ port="${4}"
+
+ echo " starting service '${service}' in namespace '${namespace}"
+
+ ${KUBECTL} get deployment --namespace=${namespace} --no-headers 2>/dev/null | grep -q ${service}
+ if [ "${?}" != "0" ]; then
+ ${KUBECTL} run ${service} --namespace=${namespace} --image=${image}
+ else
+ echo "warn: service '${service}' already running in namespace '${namespace}'"
+ fi
+
+ ${KUBECTL} get service --namespace=${namespace} --no-headers 2>/dev/null | grep -q ${service}
+ if [ "${?}" != "0" ]; then
+ ${KUBECTL} expose deployment ${service} --namespace=${namespace} --port=${port}
+ else
+ echo "warn: service '${service}' already exposed in namespace '${namespace}'"
+ fi
+}
+
+
+wait_until_k8s_ready
+
+NAMESPACES="demo test"
+create_namespaces
+
+echo ""
+echo "Starting services:"
+
+run_and_expose_service mynginx demo nginx 80
+run_and_expose_service webserver demo nginx 80
+run_and_expose_service mynginx test nginx 80
+run_and_expose_service webserver test nginx 80
+
+echo ""
+echo "Services exposed:"
+${KUBECTL} get services --all-namespaces
diff --git a/middleware/kubernetes/test/README.md b/middleware/kubernetes/test/README.md
new file mode 100644
index 000000000..eea1bf7d3
--- /dev/null
+++ b/middleware/kubernetes/test/README.md
@@ -0,0 +1,35 @@
+## Test scripts to automate kubernetes startup
+
+Requirements:
+ docker
+ curl
+
+The scripts in this directory startup kubernetes with docker as the container runtime.
+After starting kubernetes, a couple of kubernetes services are started to allow automatic
+testing of CoreDNS with kubernetes.
+
+To use, run the scripts as:
+
+~~~
+$ ./00_run_k8s.sh && ./10_setup_kubectl.sh && ./20_setup_k8s_services.sh
+~~~
+
+After running the above scripts, kubernetes will be running on the localhost with the following services
+exposed:
+
+~~
+NAMESPACE NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+default kubernetes 10.0.0.1 <none> 443/TCP 48m
+demo mynginx 10.0.0.168 <none> 80/TCP 9m
+demo webserver 10.0.0.28 <none> 80/TCP 2m
+test mynginx 10.0.0.4 <none> 80/TCP 2m
+test webserver 10.0.0.39 <none> 80/TCP 2m
+~~
+
+
+Kubernetes and all running containers can be uncerimoniously stopped by
+running the `kill_all_containers.sh` script.
+
+~~~
+$ ./kill_all_containers.sh
+~~~
diff --git a/middleware/kubernetes/test/kill_all_containers.sh b/middleware/kubernetes/test/kill_all_containers.sh
new file mode 100755
index 000000000..aa7f4255d
--- /dev/null
+++ b/middleware/kubernetes/test/kill_all_containers.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+docker rm -f $(docker ps -a -q)
+sleep 1
+docker rm -f $(docker ps -a -q)
diff --git a/middleware/kubernetes/util/util.go b/middleware/kubernetes/util/util.go
index 7fc03ffc1..259eaf596 100644
--- a/middleware/kubernetes/util/util.go
+++ b/middleware/kubernetes/util/util.go
@@ -1,6 +1,10 @@
// Package kubernetes/util provides helper functions for the kubernetes middleware
package util
+import (
+ "strings"
+)
+
// StringInSlice check whether string a is a member of slice.
func StringInSlice(a string, slice []string) bool {
for _, b := range slice {
@@ -10,3 +14,13 @@ func StringInSlice(a string, slice []string) bool {
}
return false
}
+
+// SymbolContainsWildcard checks whether symbol contains a wildcard value
+func SymbolContainsWildcard(symbol string) bool {
+ return (strings.Contains(symbol, WildcardStar) || (symbol == WildcardAny))
+}
+
+const (
+ WildcardStar = "*"
+ WildcardAny = "any"
+)
diff --git a/middleware/kubernetes/util/util_test.go b/middleware/kubernetes/util/util_test.go
index b53b9f3f6..4af64ea50 100644
--- a/middleware/kubernetes/util/util_test.go
+++ b/middleware/kubernetes/util/util_test.go
@@ -31,3 +31,25 @@ func TestStringInSlice(t *testing.T) {
}
}
}
+
+// Test data for TestSymbolContainsWildcard cases.
+var testdataSymbolContainsWildcard = []struct {
+ Symbol string
+ ExpectedResult bool
+}{
+ {"mynamespace", false},
+ {"*", true},
+ {"any", true},
+ {"my*space", true},
+ {"*space", true},
+ {"myname*", true},
+}
+
+func TestSymbolContainsWildcard(t *testing.T) {
+ for _, example := range testdataSymbolContainsWildcard {
+ actualResult := SymbolContainsWildcard(example.Symbol)
+ if actualResult != example.ExpectedResult {
+ t.Errorf("Expected SymbolContainsWildcard result '%v' for example string='%v'. Instead got result '%v'.", example.ExpectedResult, example.Symbol, actualResult)
+ }
+ }
+}
diff --git a/test/kubernetes_test.go b/test/kubernetes_test.go
new file mode 100644
index 000000000..6f6599d41
--- /dev/null
+++ b/test/kubernetes_test.go
@@ -0,0 +1,182 @@
+// +build k8sIntegration
+
+package test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "testing"
+
+ "github.com/miekg/dns"
+)
+
+// Test data for A records
+var testdataLookupA = []struct {
+ Query string
+ TotalAnswerCount int
+ ARecordCount int
+}{
+ // Matching queries
+ {"mynginx.demo.coredns.local.", 1, 1}, // One A record, should exist
+
+ // Failure queries
+ {"mynginx.test.coredns.local.", 0, 0}, // One A record, is not exposed
+ {"someservicethatdoesnotexist.demo.coredns.local.", 0, 0}, // Record does not exist
+
+ // Namespace wildcards
+ {"mynginx.*.coredns.local.", 1, 1}, // One A record, via wildcard namespace
+ {"mynginx.any.coredns.local.", 1, 1}, // One A record, via wildcard namespace
+ {"someservicethatdoesnotexist.*.coredns.local.", 0, 0}, // Record does not exist with wildcard for namespace
+ {"someservicethatdoesnotexist.any.coredns.local.", 0, 0}, // Record does not exist with wildcard for namespace
+ {"*.demo.coredns.local.", 2, 2}, // Two A records, via wildcard
+ {"any.demo.coredns.local.", 2, 2}, // Two A records, via wildcard
+ {"*.test.coredns.local.", 0, 0}, // Two A record, via wildcard that is not exposed
+ {"any.test.coredns.local.", 0, 0}, // Two A record, via wildcard that is not exposed
+ {"*.*.coredns.local.", 2, 2}, // Two A records, via namespace and service wildcard
+}
+
+// Test data for SRV records
+var testdataLookupSRV = []struct {
+ Query string
+ TotalAnswerCount int
+ // ARecordCount int
+ SRVRecordCount int
+}{
+ // Matching queries
+ {"mynginx.demo.coredns.local.", 1, 1}, // One SRV record, should exist
+
+ // Failure queries
+ {"mynginx.test.coredns.local.", 0, 0}, // One SRV record, is not exposed
+ {"someservicethatdoesnotexist.demo.coredns.local.", 0, 0}, // Record does not exist
+
+ // Namespace wildcards
+ {"mynginx.*.coredns.local.", 1, 1}, // One SRV record, via wildcard namespace
+ {"mynginx.any.coredns.local.", 1, 1}, // One SRV record, via wildcard namespace
+ {"someservicethatdoesnotexist.*.coredns.local.", 0, 0}, // Record does not exist with wildcard for namespace
+ {"someservicethatdoesnotexist.any.coredns.local.", 0, 0}, // Record does not exist with wildcard for namespace
+ {"*.demo.coredns.local.", 1, 1}, // One SRV record, via wildcard
+ {"any.demo.coredns.local.", 1, 1}, // One SRV record, via wildcard
+ {"*.test.coredns.local.", 0, 0}, // One SRV record, via wildcard that is not exposed
+ {"any.test.coredns.local.", 0, 0}, // One SRV record, via wildcard that is not exposed
+ {"*.*.coredns.local.", 1, 1}, // One SRV record, via namespace and service wildcard
+}
+
+// checkKubernetesRunning performs a basic
+func checkKubernetesRunning() bool {
+ _, err := http.Get("http://localhost:8080/api/v1")
+ return err == nil
+}
+
+func TestK8sIntegration(t *testing.T) {
+ t.Log(" === RUN testLookupA")
+ testLookupA(t)
+ t.Log(" === RUN testLookupSRV")
+ testLookupSRV(t)
+}
+
+func testLookupA(t *testing.T) {
+ if !checkKubernetesRunning() {
+ t.Skip("Skipping Kubernetes Integration tests. Kubernetes is not running")
+ }
+
+ // Note: Use different port to avoid conflict with servers used in other tests.
+ coreFile :=
+ `.:2053 {
+ kubernetes coredns.local {
+ endpoint http://localhost:8080
+ namespaces demo
+ }
+`
+
+ server, _, udp, err := Server(t, coreFile)
+ if err != nil {
+ t.Fatal("Could not get server: %s", err)
+ }
+ defer server.Stop()
+
+ log.SetOutput(ioutil.Discard)
+
+ for _, testData := range testdataLookupA {
+ t.Logf("[log] Testing query string: '%v'\n", testData.Query)
+ dnsClient := new(dns.Client)
+ dnsMessage := new(dns.Msg)
+
+ dnsMessage.SetQuestion(testData.Query, dns.TypeA)
+ dnsMessage.SetEdns0(4096, true)
+
+ res, _, err := dnsClient.Exchange(dnsMessage, udp)
+ if err != nil {
+ t.Fatal("Could not send query: %s", err)
+ }
+ // Count A records in the answer section
+ ARecordCount := 0
+ for _, a := range res.Answer {
+ if a.Header().Rrtype == dns.TypeA {
+ ARecordCount++
+ }
+ }
+
+ if ARecordCount != testData.ARecordCount {
+ t.Errorf("Expected '%v' A records in response. Instead got '%v' A records. Test query string: '%v'", testData.ARecordCount, ARecordCount, testData.Query)
+ }
+ if len(res.Answer) != testData.TotalAnswerCount {
+ t.Errorf("Expected '%v' records in answer section. Instead got '%v' records in answer section. Test query string: '%v'", testData.TotalAnswerCount, len(res.Answer), testData.Query)
+ }
+ }
+}
+
+func testLookupSRV(t *testing.T) {
+ if !checkKubernetesRunning() {
+ t.Skip("Skipping Kubernetes Integration tests. Kubernetes is not running")
+ }
+
+ // Note: Use different port to avoid conflict with servers used in other tests.
+ coreFile :=
+ `.:2054 {
+ kubernetes coredns.local {
+ endpoint http://localhost:8080
+ namespaces demo
+ }
+`
+
+ server, _, udp, err := Server(t, coreFile)
+ if err != nil {
+ t.Fatal("Could not get server: %s", err)
+ }
+ defer server.Stop()
+
+ log.SetOutput(ioutil.Discard)
+
+ // TODO: Add checks for A records in additional section
+
+ for _, testData := range testdataLookupSRV {
+ t.Logf("[log] Testing query string: '%v'\n", testData.Query)
+ dnsClient := new(dns.Client)
+ dnsMessage := new(dns.Msg)
+
+ dnsMessage.SetQuestion(testData.Query, dns.TypeSRV)
+ dnsMessage.SetEdns0(4096, true)
+
+ res, _, err := dnsClient.Exchange(dnsMessage, udp)
+ if err != nil {
+ t.Fatal("Could not send query: %s", err)
+ }
+ // Count SRV records in the answer section
+ srvRecordCount := 0
+ for _, a := range res.Answer {
+ fmt.Printf("RR: %v\n", a)
+ if a.Header().Rrtype == dns.TypeSRV {
+ srvRecordCount++
+ }
+ }
+
+ if srvRecordCount != testData.SRVRecordCount {
+ t.Errorf("Expected '%v' SRV records in response. Instead got '%v' SRV records. Test query string: '%v'", testData.SRVRecordCount, srvRecordCount, testData.Query)
+ }
+ if len(res.Answer) != testData.TotalAnswerCount {
+ t.Errorf("Expected '%v' records in answer section. Instead got '%v' records in answer section. Test query string: '%v'", testData.TotalAnswerCount, len(res.Answer), testData.Query)
+ }
+ }
+}