aboutsummaryrefslogtreecommitdiff
path: root/vendor/google.golang.org/grpc/grpclb.go
diff options
context:
space:
mode:
authorGravatar Yong Tang <yong.tang.github@outlook.com> 2018-01-15 09:59:29 -0800
committerGravatar GitHub <noreply@github.com> 2018-01-15 09:59:29 -0800
commit584dd87c70e29abc373f88be52bd2eee287ecace (patch)
tree6b4ac5286a5345c796071e4e9f7a9e6fce47a5ca /vendor/google.golang.org/grpc/grpclb.go
parentd699b89063843d81cee35f128aaef9881439151f (diff)
downloadcoredns-584dd87c70e29abc373f88be52bd2eee287ecace.tar.gz
coredns-584dd87c70e29abc373f88be52bd2eee287ecace.tar.zst
coredns-584dd87c70e29abc373f88be52bd2eee287ecace.zip
Add route53 plugin (#1390)
* Update vendor Signed-off-by: Yong Tang <yong.tang.github@outlook.com> * Add route53 plugin This fix adds route53 plugin so that it is possible to query route53 record through CoreDNS. Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
Diffstat (limited to 'vendor/google.golang.org/grpc/grpclb.go')
-rw-r--r--vendor/google.golang.org/grpc/grpclb.go820
1 files changed, 229 insertions, 591 deletions
diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go
index db56ff362..d14a5d409 100644
--- a/vendor/google.golang.org/grpc/grpclb.go
+++ b/vendor/google.golang.org/grpc/grpclb.go
@@ -19,21 +19,32 @@
package grpc
import (
- "errors"
- "fmt"
- "math/rand"
- "net"
+ "strconv"
+ "strings"
"sync"
"time"
"golang.org/x/net/context"
- "google.golang.org/grpc/codes"
- lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/connectivity"
+ lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/naming"
+ "google.golang.org/grpc/resolver"
)
+const (
+ lbTokeyKey = "lb-token"
+ defaultFallbackTimeout = 10 * time.Second
+ grpclbName = "grpclb"
+)
+
+func convertDuration(d *lbpb.Duration) time.Duration {
+ if d == nil {
+ return 0
+ }
+ return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
+}
+
// Client API for LoadBalancer service.
// Mostly copied from generated pb.go file.
// To avoid circular dependency.
@@ -59,646 +70,273 @@ type balanceLoadClientStream struct {
ClientStream
}
-func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error {
+func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
return x.ClientStream.SendMsg(m)
}
-func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) {
- m := new(lbmpb.LoadBalanceResponse)
+func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
+ m := new(lbpb.LoadBalanceResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
-// NewGRPCLBBalancer creates a grpclb load balancer.
-func NewGRPCLBBalancer(r naming.Resolver) Balancer {
- return &grpclbBalancer{
- r: r,
- }
+func init() {
+ balancer.Register(newLBBuilder())
}
-type remoteBalancerInfo struct {
- addr string
- // the server name used for authentication with the remote LB server.
- name string
+// newLBBuilder creates a builder for grpclb.
+func newLBBuilder() balancer.Builder {
+ return NewLBBuilderWithFallbackTimeout(defaultFallbackTimeout)
}
-// grpclbAddrInfo consists of the information of a backend server.
-type grpclbAddrInfo struct {
- addr Address
- connected bool
- // dropForRateLimiting indicates whether this particular request should be
- // dropped by the client for rate limiting.
- dropForRateLimiting bool
- // dropForLoadBalancing indicates whether this particular request should be
- // dropped by the client for load balancing.
- dropForLoadBalancing bool
+// NewLBBuilderWithFallbackTimeout creates a grpclb builder with the given
+// fallbackTimeout. If no response is received from the remote balancer within
+// fallbackTimeout, the backend addresses from the resolved address list will be
+// used.
+//
+// Only call this function when a non-default fallback timeout is needed.
+func NewLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder {
+ return &lbBuilder{
+ fallbackTimeout: fallbackTimeout,
+ }
}
-type grpclbBalancer struct {
- r naming.Resolver
- target string
- mu sync.Mutex
- seq int // a sequence number to make sure addrCh does not get stale addresses.
- w naming.Watcher
- addrCh chan []Address
- rbs []remoteBalancerInfo
- addrs []*grpclbAddrInfo
- next int
- waitCh chan struct{}
- done bool
- rand *rand.Rand
-
- clientStats lbmpb.ClientStats
+type lbBuilder struct {
+ fallbackTimeout time.Duration
}
-func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
- updates, err := w.Next()
- if err != nil {
- grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err)
- return err
- }
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.done {
- return ErrClientConnClosing
- }
- for _, update := range updates {
- switch update.Op {
- case naming.Add:
- var exist bool
- for _, v := range b.rbs {
- // TODO: Is the same addr with different server name a different balancer?
- if update.Addr == v.addr {
- exist = true
- break
- }
- }
- if exist {
- continue
- }
- md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB)
- if !ok {
- // TODO: Revisit the handling here and may introduce some fallback mechanism.
- grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata)
- continue
- }
- switch md.AddrType {
- case naming.Backend:
- // TODO: Revisit the handling here and may introduce some fallback mechanism.
- grpclog.Errorf("The name resolution does not give grpclb addresses")
- continue
- case naming.GRPCLB:
- b.rbs = append(b.rbs, remoteBalancerInfo{
- addr: update.Addr,
- name: md.ServerName,
- })
- default:
- grpclog.Errorf("Received unknow address type %d", md.AddrType)
- continue
- }
- case naming.Delete:
- for i, v := range b.rbs {
- if update.Addr == v.addr {
- copy(b.rbs[i:], b.rbs[i+1:])
- b.rbs = b.rbs[:len(b.rbs)-1]
- break
- }
- }
- default:
- grpclog.Errorf("Unknown update.Op %v", update.Op)
- }
+func (b *lbBuilder) Name() string {
+ return grpclbName
+}
+
+func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+ // This generates a manual resolver builder with a random scheme. This
+ // scheme will be used to dial to remote LB, so we can send filtered address
+ // updates to remote LB ClientConn using this manual resolver.
+ scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36)
+ r := &lbManualResolver{scheme: scheme, ccb: cc}
+
+ var target string
+ targetSplitted := strings.Split(cc.Target(), ":///")
+ if len(targetSplitted) < 2 {
+ target = cc.Target()
+ } else {
+ target = targetSplitted[1]
}
- // TODO: Fall back to the basic round-robin load balancing if the resulting address is
- // not a load balancer.
- select {
- case <-ch:
- default:
+
+ lb := &lbBalancer{
+ cc: cc,
+ target: target,
+ opt: opt,
+ fallbackTimeout: b.fallbackTimeout,
+ doneCh: make(chan struct{}),
+
+ manualResolver: r,
+ csEvltr: &connectivityStateEvaluator{},
+ subConns: make(map[resolver.Address]balancer.SubConn),
+ scStates: make(map[balancer.SubConn]connectivity.State),
+ picker: &errPicker{err: balancer.ErrNoSubConnAvailable},
+ clientStats: &rpcStats{},
}
- ch <- b.rbs
- return nil
+
+ return lb
}
-func convertDuration(d *lbmpb.Duration) time.Duration {
- if d == nil {
- return 0
- }
- return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
+type lbBalancer struct {
+ cc balancer.ClientConn
+ target string
+ opt balancer.BuildOptions
+ fallbackTimeout time.Duration
+ doneCh chan struct{}
+
+ // manualResolver is used in the remote LB ClientConn inside grpclb. When
+ // resolved address updates are received by grpclb, filtered updates will be
+ // send to remote LB ClientConn through this resolver.
+ manualResolver *lbManualResolver
+ // The ClientConn to talk to the remote balancer.
+ ccRemoteLB *ClientConn
+
+ // Support client side load reporting. Each picker gets a reference to this,
+ // and will update its content.
+ clientStats *rpcStats
+
+ mu sync.Mutex // guards everything following.
+ // The full server list including drops, used to check if the newly received
+ // serverList contains anything new. Each generate picker will also have
+ // reference to this list to do the first layer pick.
+ fullServerList []*lbpb.Server
+ // All backends addresses, with metadata set to nil. This list contains all
+ // backend addresses in the same order and with the same duplicates as in
+ // serverlist. When generating picker, a SubConn slice with the same order
+ // but with only READY SCs will be gerenated.
+ backendAddrs []resolver.Address
+ // Roundrobin functionalities.
+ csEvltr *connectivityStateEvaluator
+ state connectivity.State
+ subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn.
+ scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
+ picker balancer.Picker
+ // Support fallback to resolved backend addresses if there's no response
+ // from remote balancer within fallbackTimeout.
+ fallbackTimerExpired bool
+ serverListReceived bool
+ // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set
+ // when resolved address updates are received, and read in the goroutine
+ // handling fallback.
+ resolvedBackendAddrs []resolver.Address
}
-func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) {
- if l == nil {
+// regeneratePicker takes a snapshot of the balancer, and generates a picker from
+// it. The picker
+// - always returns ErrTransientFailure if the balancer is in TransientFailure,
+// - does two layer roundrobin pick otherwise.
+// Caller must hold lb.mu.
+func (lb *lbBalancer) regeneratePicker() {
+ if lb.state == connectivity.TransientFailure {
+ lb.picker = &errPicker{err: balancer.ErrTransientFailure}
return
}
- servers := l.GetServers()
- var (
- sl []*grpclbAddrInfo
- addrs []Address
- )
- for _, s := range servers {
- md := metadata.Pairs("lb-token", s.LoadBalanceToken)
- ip := net.IP(s.IpAddress)
- ipStr := ip.String()
- if ip.To4() == nil {
- // Add square brackets to ipv6 addresses, otherwise net.Dial() and
- // net.SplitHostPort() will return too many colons error.
- ipStr = fmt.Sprintf("[%s]", ipStr)
- }
- addr := Address{
- Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
- Metadata: &md,
+ var readySCs []balancer.SubConn
+ for _, a := range lb.backendAddrs {
+ if sc, ok := lb.subConns[a]; ok {
+ if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready {
+ readySCs = append(readySCs, sc)
+ }
}
- sl = append(sl, &grpclbAddrInfo{
- addr: addr,
- dropForRateLimiting: s.DropForRateLimiting,
- dropForLoadBalancing: s.DropForLoadBalancing,
- })
- addrs = append(addrs, addr)
}
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.done || seq < b.seq {
- return
- }
- if len(sl) > 0 {
- // reset b.next to 0 when replacing the server list.
- b.next = 0
- b.addrs = sl
- b.addrCh <- addrs
- }
- return
-}
-func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
- for {
- select {
- case <-ticker.C:
- case <-done:
- return
- }
- b.mu.Lock()
- stats := b.clientStats
- b.clientStats = lbmpb.ClientStats{} // Clear the stats.
- b.mu.Unlock()
- t := time.Now()
- stats.Timestamp = &lbmpb.Timestamp{
- Seconds: t.Unix(),
- Nanos: int32(t.Nanosecond()),
- }
- if err := s.Send(&lbmpb.LoadBalanceRequest{
- LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{
- ClientStats: &stats,
- },
- }); err != nil {
- grpclog.Errorf("grpclb: failed to send load report: %v", err)
+ if len(lb.fullServerList) <= 0 {
+ if len(readySCs) <= 0 {
+ lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
return
}
- }
-}
-
-func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- stream, err := lbc.BalanceLoad(ctx)
- if err != nil {
- grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
+ lb.picker = &rrPicker{subConns: readySCs}
return
}
- b.mu.Lock()
- if b.done {
- b.mu.Unlock()
- return
- }
- b.mu.Unlock()
- initReq := &lbmpb.LoadBalanceRequest{
- LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{
- InitialRequest: &lbmpb.InitialLoadBalanceRequest{
- Name: b.target,
- },
- },
+ lb.picker = &lbPicker{
+ serverList: lb.fullServerList,
+ subConns: readySCs,
+ stats: lb.clientStats,
}
- if err := stream.Send(initReq); err != nil {
- grpclog.Errorf("grpclb: failed to send init request: %v", err)
- // TODO: backoff on retry?
- return true
- }
- reply, err := stream.Recv()
- if err != nil {
- grpclog.Errorf("grpclb: failed to recv init response: %v", err)
- // TODO: backoff on retry?
- return true
- }
- initResp := reply.GetInitialResponse()
- if initResp == nil {
- grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.")
- return
- }
- // TODO: Support delegation.
- if initResp.LoadBalancerDelegate != "" {
- // delegation
- grpclog.Errorf("TODO: Delegation is not supported yet.")
- return
- }
- streamDone := make(chan struct{})
- defer close(streamDone)
- b.mu.Lock()
- b.clientStats = lbmpb.ClientStats{} // Clear client stats.
- b.mu.Unlock()
- if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
- go b.sendLoadReport(stream, d, streamDone)
- }
- // Retrieve the server list.
- for {
- reply, err := stream.Recv()
- if err != nil {
- grpclog.Errorf("grpclb: failed to recv server list: %v", err)
- break
- }
- b.mu.Lock()
- if b.done || seq < b.seq {
- b.mu.Unlock()
- return
- }
- b.seq++ // tick when receiving a new list of servers.
- seq = b.seq
- b.mu.Unlock()
- if serverList := reply.GetServerList(); serverList != nil {
- b.processServerList(serverList, seq)
- }
- }
- return true
+ return
}
-func (b *grpclbBalancer) Start(target string, config BalancerConfig) error {
- b.rand = rand.New(rand.NewSource(time.Now().Unix()))
- // TODO: Fall back to the basic direct connection if there is no name resolver.
- if b.r == nil {
- return errors.New("there is no name resolver installed")
+func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+ grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
+ lb.mu.Lock()
+ defer lb.mu.Unlock()
+
+ oldS, ok := lb.scStates[sc]
+ if !ok {
+ grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+ return
}
- b.target = target
- b.mu.Lock()
- if b.done {
- b.mu.Unlock()
- return ErrClientConnClosing
+ lb.scStates[sc] = s
+ switch s {
+ case connectivity.Idle:
+ sc.Connect()
+ case connectivity.Shutdown:
+ // When an address was removed by resolver, b called RemoveSubConn but
+ // kept the sc's state in scStates. Remove state for this sc here.
+ delete(lb.scStates, sc)
}
- b.addrCh = make(chan []Address)
- w, err := b.r.Resolve(target)
- if err != nil {
- b.mu.Unlock()
- grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err)
- return err
- }
- b.w = w
- b.mu.Unlock()
- balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
- // Spawn a goroutine to monitor the name resolution of remote load balancer.
- go func() {
- for {
- if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
- grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err)
- close(balancerAddrsCh)
- return
- }
- }
- }()
- // Spawn a goroutine to talk to the remote load balancer.
- go func() {
- var (
- cc *ClientConn
- // ccError is closed when there is an error in the current cc.
- // A new rb should be picked from rbs and connected.
- ccError chan struct{}
- rb *remoteBalancerInfo
- rbs []remoteBalancerInfo
- rbIdx int
- )
-
- defer func() {
- if ccError != nil {
- select {
- case <-ccError:
- default:
- close(ccError)
- }
- }
- if cc != nil {
- cc.Close()
- }
- }()
-
- for {
- var ok bool
- select {
- case rbs, ok = <-balancerAddrsCh:
- if !ok {
- return
- }
- foundIdx := -1
- if rb != nil {
- for i, trb := range rbs {
- if trb == *rb {
- foundIdx = i
- break
- }
- }
- }
- if foundIdx >= 0 {
- if foundIdx >= 1 {
- // Move the address in use to the beginning of the list.
- b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
- rbIdx = 0
- }
- continue // If found, don't dial new cc.
- } else if len(rbs) > 0 {
- // Pick a random one from the list, instead of always using the first one.
- if l := len(rbs); l > 1 && rb != nil {
- tmpIdx := b.rand.Intn(l - 1)
- b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
- }
- rbIdx = 0
- rb = &rbs[0]
- } else {
- // foundIdx < 0 && len(rbs) <= 0.
- rb = nil
- }
- case <-ccError:
- ccError = nil
- if rbIdx < len(rbs)-1 {
- rbIdx++
- rb = &rbs[rbIdx]
- } else {
- rb = nil
- }
- }
-
- if rb == nil {
- continue
- }
- if cc != nil {
- cc.Close()
- }
- // Talk to the remote load balancer to get the server list.
- var (
- err error
- dopts []DialOption
- )
- if creds := config.DialCreds; creds != nil {
- if rb.name != "" {
- if err := creds.OverrideServerName(rb.name); err != nil {
- grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err)
- continue
- }
- }
- dopts = append(dopts, WithTransportCredentials(creds))
- } else {
- dopts = append(dopts, WithInsecure())
- }
- if dialer := config.Dialer; dialer != nil {
- // WithDialer takes a different type of function, so we instead use a special DialOption here.
- dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
- }
- dopts = append(dopts, WithBlock())
- ccError = make(chan struct{})
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- cc, err = DialContext(ctx, rb.addr, dopts...)
- cancel()
- if err != nil {
- grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
- close(ccError)
- continue
- }
- b.mu.Lock()
- b.seq++ // tick when getting a new balancer address
- seq := b.seq
- b.next = 0
- b.mu.Unlock()
- go func(cc *ClientConn, ccError chan struct{}) {
- lbc := &loadBalancerClient{cc}
- b.callRemoteBalancer(lbc, seq)
- cc.Close()
- select {
- case <-ccError:
- default:
- close(ccError)
- }
- }(cc, ccError)
- }
- }()
- return nil
-}
+ oldAggrState := lb.state
+ lb.state = lb.csEvltr.recordTransition(oldS, s)
-func (b *grpclbBalancer) down(addr Address, err error) {
- b.mu.Lock()
- defer b.mu.Unlock()
- for _, a := range b.addrs {
- if addr == a.addr {
- a.connected = false
- break
- }
+ // Regenerate picker when one of the following happens:
+ // - this sc became ready from not-ready
+ // - this sc became not-ready from ready
+ // - the aggregated state of balancer became TransientFailure from non-TransientFailure
+ // - the aggregated state of balancer became non-TransientFailure from TransientFailure
+ if (oldS == connectivity.Ready) != (s == connectivity.Ready) ||
+ (lb.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
+ lb.regeneratePicker()
}
+
+ lb.cc.UpdateBalancerState(lb.state, lb.picker)
+ return
}
-func (b *grpclbBalancer) Up(addr Address) func(error) {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.done {
- return nil
- }
- var cnt int
- for _, a := range b.addrs {
- if a.addr == addr {
- if a.connected {
- return nil
- }
- a.connected = true
- }
- if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
- cnt++
- }
- }
- // addr is the only one which is connected. Notify the Get() callers who are blocking.
- if cnt == 1 && b.waitCh != nil {
- close(b.waitCh)
- b.waitCh = nil
+// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
+// resolved backends (backends received from resolver, not from remote balancer)
+// if no connection to remote balancers was successful.
+func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
+ timer := time.NewTimer(fallbackTimeout)
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ case <-lb.doneCh:
+ return
}
- return func(err error) {
- b.down(addr, err)
+ lb.mu.Lock()
+ if lb.serverListReceived {
+ lb.mu.Unlock()
+ return
}
+ lb.fallbackTimerExpired = true
+ lb.refreshSubConns(lb.resolvedBackendAddrs)
+ lb.mu.Unlock()
}
-func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
- var ch chan struct{}
- b.mu.Lock()
- if b.done {
- b.mu.Unlock()
- err = ErrClientConnClosing
+// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
+// clientConn. The remoteLB clientConn will handle creating/removing remoteLB
+// connections.
+func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
+ grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs)
+ if len(addrs) <= 0 {
return
}
- seq := b.seq
- defer func() {
- if err != nil {
- return
- }
- put = func() {
- s, ok := rpcInfoFromContext(ctx)
- if !ok {
- return
- }
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.done || seq < b.seq {
- return
- }
- b.clientStats.NumCallsFinished++
- if !s.bytesSent {
- b.clientStats.NumCallsFinishedWithClientFailedToSend++
- } else if s.bytesReceived {
- b.clientStats.NumCallsFinishedKnownReceived++
- }
+ var remoteBalancerAddrs, backendAddrs []resolver.Address
+ for _, a := range addrs {
+ if a.Type == resolver.GRPCLB {
+ remoteBalancerAddrs = append(remoteBalancerAddrs, a)
+ } else {
+ backendAddrs = append(backendAddrs, a)
}
- }()
-
- b.clientStats.NumCallsStarted++
- if len(b.addrs) > 0 {
- if b.next >= len(b.addrs) {
- b.next = 0
- }
- next := b.next
- for {
- a := b.addrs[next]
- next = (next + 1) % len(b.addrs)
- if a.connected {
- if !a.dropForRateLimiting && !a.dropForLoadBalancing {
- addr = a.addr
- b.next = next
- b.mu.Unlock()
- return
- }
- if !opts.BlockingWait {
- b.next = next
- if a.dropForLoadBalancing {
- b.clientStats.NumCallsFinished++
- b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
- } else if a.dropForRateLimiting {
- b.clientStats.NumCallsFinished++
- b.clientStats.NumCallsFinishedWithDropForRateLimiting++
- }
- b.mu.Unlock()
- err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
- return
- }
- }
- if next == b.next {
- // Has iterated all the possible address but none is connected.
- break
- }
- }
- }
- if !opts.BlockingWait {
- b.clientStats.NumCallsFinished++
- b.clientStats.NumCallsFinishedWithClientFailedToSend++
- b.mu.Unlock()
- err = Errorf(codes.Unavailable, "there is no address available")
- return
}
- // Wait on b.waitCh for non-failfast RPCs.
- if b.waitCh == nil {
- ch = make(chan struct{})
- b.waitCh = ch
- } else {
- ch = b.waitCh
- }
- b.mu.Unlock()
- for {
- select {
- case <-ctx.Done():
- b.mu.Lock()
- b.clientStats.NumCallsFinished++
- b.clientStats.NumCallsFinishedWithClientFailedToSend++
- b.mu.Unlock()
- err = ctx.Err()
- return
- case <-ch:
- b.mu.Lock()
- if b.done {
- b.clientStats.NumCallsFinished++
- b.clientStats.NumCallsFinishedWithClientFailedToSend++
- b.mu.Unlock()
- err = ErrClientConnClosing
- return
- }
- if len(b.addrs) > 0 {
- if b.next >= len(b.addrs) {
- b.next = 0
- }
- next := b.next
- for {
- a := b.addrs[next]
- next = (next + 1) % len(b.addrs)
- if a.connected {
- if !a.dropForRateLimiting && !a.dropForLoadBalancing {
- addr = a.addr
- b.next = next
- b.mu.Unlock()
- return
- }
- if !opts.BlockingWait {
- b.next = next
- if a.dropForLoadBalancing {
- b.clientStats.NumCallsFinished++
- b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
- } else if a.dropForRateLimiting {
- b.clientStats.NumCallsFinished++
- b.clientStats.NumCallsFinishedWithDropForRateLimiting++
- }
- b.mu.Unlock()
- err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
- return
- }
- }
- if next == b.next {
- // Has iterated all the possible address but none is connected.
- break
- }
- }
- }
- // The newly added addr got removed by Down() again.
- if b.waitCh == nil {
- ch = make(chan struct{})
- b.waitCh = ch
- } else {
- ch = b.waitCh
- }
- b.mu.Unlock()
+ if lb.ccRemoteLB == nil {
+ if len(remoteBalancerAddrs) <= 0 {
+ grpclog.Errorf("grpclb: no remote balancer address is available, should never happen")
+ return
}
+ // First time receiving resolved addresses, create a cc to remote
+ // balancers.
+ lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName)
+ // Start the fallback goroutine.
+ go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
}
-}
-func (b *grpclbBalancer) Notify() <-chan []Address {
- return b.addrCh
+ // cc to remote balancers uses lb.manualResolver. Send the updated remote
+ // balancer addresses to it through manualResolver.
+ lb.manualResolver.NewAddress(remoteBalancerAddrs)
+
+ lb.mu.Lock()
+ lb.resolvedBackendAddrs = backendAddrs
+ // If serverListReceived is true, connection to remote balancer was
+ // successful and there's no need to do fallback anymore.
+ // If fallbackTimerExpired is false, fallback hasn't happened yet.
+ if !lb.serverListReceived && lb.fallbackTimerExpired {
+ // This means we received a new list of resolved backends, and we are
+ // still in fallback mode. Need to update the list of backends we are
+ // using to the new list of backends.
+ lb.refreshSubConns(lb.resolvedBackendAddrs)
+ }
+ lb.mu.Unlock()
}
-func (b *grpclbBalancer) Close() error {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.done {
- return errBalancerClosed
- }
- b.done = true
- if b.waitCh != nil {
- close(b.waitCh)
- }
- if b.addrCh != nil {
- close(b.addrCh)
+func (lb *lbBalancer) Close() {
+ select {
+ case <-lb.doneCh:
+ return
+ default:
}
- if b.w != nil {
- b.w.Close()
+ close(lb.doneCh)
+ if lb.ccRemoteLB != nil {
+ lb.ccRemoteLB.Close()
}
- return nil
}