aboutsummaryrefslogtreecommitdiff
path: root/plugin/proxy/lookup.go
blob: 44bee4b0eb030fcb9e8d78818dd9560dcb119690 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
package proxy

// functions other plugin might want to use to do lookup in the same style as the proxy.

import (
	"context"
	"fmt"
	"net"
	"sync/atomic"
	"time"

	"github.com/coredns/coredns/plugin/pkg/healthcheck"
	"github.com/coredns/coredns/request"

	"github.com/miekg/dns"
)

// NewLookup create a new proxy with the hosts in host and a Random policy.
func NewLookup(hosts []string) Proxy { return NewLookupWithOption(hosts, Options{}) }

// NewLookupWithOption process creates a simple round robin forward with potentially forced proto for upstream.
func NewLookupWithOption(hosts []string, opts Options) Proxy {
	p := Proxy{Next: nil}

	// TODO(miek): this needs to be unified with upstream.go's NewStaticUpstreams, caddy uses NewHost
	// we should copy/make something similar.
	upstream := &staticUpstream{
		from: ".",
		HealthCheck: healthcheck.HealthCheck{
			FailTimeout: 5 * time.Second,
			MaxFails:    3,
		},
		ex: newDNSExWithOption(opts),
	}
	upstream.Hosts = make([]*healthcheck.UpstreamHost, len(hosts))

	for i, host := range hosts {
		uh := &healthcheck.UpstreamHost{
			Name:        host,
			FailTimeout: upstream.FailTimeout,
			CheckDown:   checkDownFunc(upstream),
		}

		upstream.Hosts[i] = uh
	}
	p.Upstreams = &[]Upstream{upstream}
	return p
}

// Lookup will use name and type to forge a new message and will send that upstream. It will
// set any EDNS0 options correctly so that downstream will be able to process the reply.
func (p Proxy) Lookup(state request.Request, name string, typ uint16) (*dns.Msg, error) {
	req := new(dns.Msg)
	req.SetQuestion(name, typ)
	state.SizeAndDo(req)

	state2 := request.Request{W: state.W, Req: req}

	return p.lookup(state2)
}

// Forward forward the request in state as-is. Unlike Lookup that adds EDNS0 suffix to the message.
func (p Proxy) Forward(state request.Request) (*dns.Msg, error) {
	return p.lookup(state)
}

func (p Proxy) lookup(state request.Request) (*dns.Msg, error) {
	upstream := p.match(state)
	if upstream == nil {
		return nil, errInvalidDomain
	}
	for {
		start := time.Now()
		reply := new(dns.Msg)
		var backendErr error

		// Since Select() should give us "up" hosts, keep retrying
		// hosts until timeout (or until we get a nil host).
		for time.Since(start) < tryDuration {
			host := upstream.Select()
			if host == nil {
				return nil, fmt.Errorf("%s: %s", errUnreachable, "no upstream host")
			}

			// duplicated from proxy.go, but with a twist, we don't write the
			// reply back to the client, we return it and there is no monitoring to update here.

			atomic.AddInt64(&host.Conns, 1)

			reply, backendErr = upstream.Exchanger().Exchange(context.TODO(), host.Name, state)

			atomic.AddInt64(&host.Conns, -1)

			if backendErr == nil {

				if !state.Match(reply) {
					return state.ErrorMessage(dns.RcodeFormatError), nil
				}

				return reply, nil
			}

			if oe, ok := backendErr.(*net.OpError); ok {
				if oe.Timeout() { // see proxy.go for docs.
					continue
				}
			}

			timeout := host.FailTimeout
			if timeout == 0 {
				timeout = defaultFailTimeout
			}

			atomic.AddInt32(&host.Fails, 1)
			fails := atomic.LoadInt32(&host.Fails)

			go func(host *healthcheck.UpstreamHost, timeout time.Duration) {
				time.Sleep(timeout)
				atomic.AddInt32(&host.Fails, -1)
				if fails%failureCheck == 0 { // Kick off healthcheck on eveyry third failure.
					host.HealthCheckURL()
				}
			}(host, timeout)
		}
		return nil, fmt.Errorf("%s: %s", errUnreachable, backendErr)
	}
}